code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
snake_case = XGLMTokenizer
snake_case = XGLMTokenizerFast
snake_case = True
snake_case = True
def _lowercase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = XGLMTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self ):
snake_case_ = "<pad>"
snake_case_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def _lowercase ( self ):
snake_case_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(len(UpperCAmelCase_ ) , 10_08 )
def _lowercase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_08 )
def _lowercase ( self ):
snake_case_ = XGLMTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
snake_case_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCAmelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
snake_case_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCAmelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
snake_case_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
snake_case_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _lowercase ( self ):
return XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
def _lowercase ( self ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(UpperCAmelCase_ , f.name )
snake_case_ = XGLMTokenizer(f.name , keep_accents=UpperCAmelCase_ )
snake_case_ = pickle.dumps(UpperCAmelCase_ )
pickle.loads(UpperCAmelCase_ )
def _lowercase ( self ):
if not self.test_rust_tokenizer:
return
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = "I was born in 92000, and this is falsé."
snake_case_ = tokenizer.tokenize(UpperCAmelCase_ )
snake_case_ = rust_tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
snake_case_ = rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = self.get_rust_tokenizer()
snake_case_ = tokenizer.encode(UpperCAmelCase_ )
snake_case_ = rust_tokenizer.encode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def _lowercase ( self ):
snake_case_ = "Hello World!"
snake_case_ = [2, 3_12_27, 44_47, 35]
self.assertListEqual(UpperCAmelCase_ , self.big_tokenizer.encode(UpperCAmelCase_ ) )
@slow
def _lowercase ( self ):
snake_case_ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
snake_case_ = [2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35]
# fmt: on
self.assertListEqual(UpperCAmelCase_ , self.big_tokenizer.encode(UpperCAmelCase_ ) )
@slow
def _lowercase ( self ):
# fmt: off
snake_case_ = {
"input_ids": [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name="facebook/xglm-564M" , padding=UpperCAmelCase_ , )
| 508 |
'''simple docstring'''
def __snake_case ( lowercase : int = 1_000_000 ):
snake_case_ = set(range(3 , lowercase , 2 ) )
primes.add(2 )
for p in range(3 , lowercase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowercase , lowercase ) ) )
snake_case_ = [float(lowercase ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowercase , limit + 1 , lowercase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 508 | 1 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """"""
a_ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
a_ = None # compression type in fsspec. ex: "gzip"
a_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : int ,_a : str = "" ,_a : Optional[str] = None ,_a : Optional[dict] = None ,**_a : Optional[Any] ):
'''simple docstring'''
super().__init__(self ,**_a )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
A_ : Dict = fsspec.open(
_a ,mode="""rb""" ,protocol=_a ,compression=self.compression ,client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" ,{} ), # To avoid issues if it was already passed.
} ,**(target_options or {}) ,)
A_ : Dict = os.path.basename(self.file.path.split("""::""" )[0] )
A_ : Dict = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
A_ : int = None
@classmethod
def _a ( cls : List[str] ,_a : str ):
'''simple docstring'''
return super()._strip_protocol(_a ).lstrip("""/""" )
def _a ( self : Optional[Any] ):
'''simple docstring'''
if self.dir_cache is None:
A_ : Optional[Any] = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
A_ : Optional[Any] = {f["""name"""]: f}
def _a ( self : Optional[int] ,_a : str ):
'''simple docstring'''
return self.file.open().read()
def _a ( self : Optional[int] ,_a : str ,_a : str = "rb" ,_a : Any=None ,_a : str=True ,_a : int=None ,**_a : List[Any] ,):
'''simple docstring'''
A_ : Dict = self._strip_protocol(_a )
if mode != "rb":
raise ValueError(f'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'' )
return self.file.open()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """bz2"""
a_ = """bz2"""
a_ = """.bz2"""
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """gzip"""
a_ = """gzip"""
a_ = """.gz"""
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """lz4"""
a_ = """lz4"""
a_ = """.lz4"""
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """xz"""
a_ = """xz"""
a_ = """.xz"""
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """zstd"""
a_ = """zstd"""
a_ = """.zst"""
def __init__( self : Union[str, Any] ,_a : str ,_a : str = "rb" ,_a : Optional[str] = None ,_a : Optional[dict] = None ,_a : int = DEFAULT_BLOCK_SIZE ,**_a : Union[str, Any] ,):
'''simple docstring'''
super().__init__(
fo=_a ,mode=_a ,target_protocol=_a ,target_options=_a ,block_size=_a ,**_a ,)
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
A_ : Optional[Any] = self.file.__enter__
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Tuple ,_a : List[Any] ):
'''simple docstring'''
A_ : Tuple = file_
def __enter__( self : Tuple ):
'''simple docstring'''
self._file.__enter__()
return self
def __exit__( self : Union[str, Any] ,*_a : Tuple ,**_a : List[str] ):
'''simple docstring'''
self._file.__exit__(*_a ,**_a )
def __iter__( self : Any ):
'''simple docstring'''
return iter(self._file )
def _a ( self : Dict ):
'''simple docstring'''
return next(self._file )
def __getattr__( self : str ,_a : Tuple ):
'''simple docstring'''
return getattr(self._file ,_a )
def fixed_enter(*_a : Dict ,**_a : Dict ):
return WrappedFile(_enter(*_a ,**_a ) )
A_ : List[str] = fixed_enter
| 27 |
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit_text_model"""
def __init__( self : Union[str, Any] ,_a : Any=49408 ,_a : Any=512 ,_a : Tuple=2048 ,_a : Dict=12 ,_a : Optional[int]=8 ,_a : Tuple=16 ,_a : Tuple="quick_gelu" ,_a : Optional[Any]=1e-5 ,_a : List[Any]=0.0 ,_a : Optional[int]=0.02 ,_a : Dict=1.0 ,_a : Dict=0 ,_a : Any=49406 ,_a : Tuple=49407 ,**_a : List[Any] ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a )
A_ : Tuple = vocab_size
A_ : int = hidden_size
A_ : Optional[int] = intermediate_size
A_ : Optional[int] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : int = max_position_embeddings
A_ : str = hidden_act
A_ : Union[str, Any] = layer_norm_eps
A_ : Tuple = attention_dropout
A_ : Union[str, Any] = initializer_range
A_ : List[Any] = initializer_factor
@classmethod
def _a ( cls : List[str] ,_a : Union[str, os.PathLike] ,**_a : str ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : int = cls.get_config_dict(_a ,**_a )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
A_ : Union[str, Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit_vision_model"""
def __init__( self : List[Any] ,_a : Optional[Any]=768 ,_a : Tuple=3072 ,_a : Dict=12 ,_a : int=12 ,_a : Dict=3 ,_a : Tuple=768 ,_a : int=32 ,_a : int="quick_gelu" ,_a : List[Any]=1e-5 ,_a : Tuple=0.0 ,_a : List[Any]=0.02 ,_a : str=1.0 ,**_a : int ,):
'''simple docstring'''
super().__init__(**_a )
A_ : List[str] = hidden_size
A_ : Union[str, Any] = intermediate_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : int = num_channels
A_ : str = image_size
A_ : List[Any] = patch_size
A_ : int = hidden_act
A_ : List[Any] = layer_norm_eps
A_ : List[str] = attention_dropout
A_ : str = initializer_range
A_ : str = initializer_factor
@classmethod
def _a ( cls : List[Any] ,_a : Union[str, os.PathLike] ,**_a : str ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : Optional[int] = cls.get_config_dict(_a ,**_a )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
A_ : List[str] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit"""
a_ = True
def __init__( self : Union[str, Any] ,_a : List[str]=None ,_a : List[str]=None ,_a : Dict=512 ,_a : List[Any]=2.6592 ,_a : Optional[Any]=True ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(**_a )
if text_config is None:
A_ : List[Any] = {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" )
if vision_config is None:
A_ : Tuple = {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" )
A_ : Dict = OwlViTTextConfig(**_a )
A_ : Dict = OwlViTVisionConfig(**_a )
A_ : Any = projection_dim
A_ : Optional[int] = logit_scale_init_value
A_ : Optional[int] = return_dict
A_ : Dict = 1.0
@classmethod
def _a ( cls : Union[str, Any] ,_a : Union[str, os.PathLike] ,**_a : Optional[int] ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : List[Any] = cls.get_config_dict(_a ,**_a )
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
@classmethod
def _a ( cls : int ,_a : Dict ,_a : Dict ,**_a : List[str] ):
'''simple docstring'''
A_ : str = {}
A_ : int = text_config
A_ : Union[str, Any] = vision_config
return cls.from_dict(_a ,**_a )
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Dict = copy.deepcopy(self.__dict__ )
A_ : str = self.text_config.to_dict()
A_ : Optional[int] = self.vision_config.to_dict()
A_ : List[Any] = self.__class__.model_type
return output
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : int ):
'''simple docstring'''
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
] )
@property
def _a ( self : str ):
'''simple docstring'''
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
] )
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 1e-4
def _a ( self : int ,_a : "ProcessorMixin" ,_a : int = -1 ,_a : int = -1 ,_a : Optional["TensorType"] = None ,):
'''simple docstring'''
A_ : Any = super().generate_dummy_inputs(
processor.tokenizer ,batch_size=_a ,seq_length=_a ,framework=_a )
A_ : Any = super().generate_dummy_inputs(
processor.image_processor ,batch_size=_a ,framework=_a )
return {**text_input_dict, **image_input_dict}
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 14
| 27 | 1 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
__A ={
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
# Initialise PyTorch model
lowerCamelCase_ = XLNetConfig.from_json_file(lowerCamelCase__ )
lowerCamelCase_ = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F'Building PyTorch XLNetForSequenceClassification model from configuration: {config}' )
lowerCamelCase_ = finetuning_task
lowerCamelCase_ = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowerCamelCase_ = XLNetForSequenceClassification(lowerCamelCase__ )
elif "squad" in finetuning_task:
lowerCamelCase_ = finetuning_task
lowerCamelCase_ = XLNetForQuestionAnswering(lowerCamelCase__ )
else:
lowerCamelCase_ = XLNetLMHeadModel(lowerCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model
lowerCamelCase_ = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
print(F'Save PyTorch model to {os.path.abspath(lowerCamelCase__ )}' )
torch.save(model.state_dict() , lowerCamelCase__ )
print(F'Save configuration file to {os.path.abspath(lowerCamelCase__ )}' )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
__A =parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 463 |
import re
def lowerCamelCase_ ( lowerCamelCase__ ):
return [char.split() for char in re.split(r"[^ a-z A-Z 0-9 \s]" , str_ )]
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = split_input(str_ )
return "".join(
["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
try:
lowerCamelCase_ = split_input(lowerCamelCase__ )
if upper:
lowerCamelCase_ = "".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
lowerCamelCase_ = "".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def lowerCamelCase_ ( lowerCamelCase__ ):
return to_simple_case(lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ ):
try:
lowerCamelCase_ = to_simple_case(lowerCamelCase__ )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
return to_complex_case(lowerCamelCase__ , lowerCamelCase__ , "_" )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
return to_complex_case(lowerCamelCase__ , lowerCamelCase__ , "-" )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 463 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : List[str] , A_ : TransformeraDModel , A_ : AutoencoderKL , A_ : KarrasDiffusionSchedulers , A_ : Optional[Dict[int, str]] = None , ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(transformer=A_ , vae=A_ , scheduler=A_ )
# create a imagenet -> id dictionary for easier use
lowerCamelCase_ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(',' ):
lowerCamelCase_ = int(A_ )
lowerCamelCase_ = dict(sorted(self.labels.items() ) )
def a__ ( self : Dict , A_ : Union[str, List[str]] ) -> List[int]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
lowerCamelCase_ = list(A_ )
for l in label:
if l not in self.labels:
raise ValueError(
f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : int , A_ : List[int] , A_ : float = 4.0 , A_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A_ : int = 50 , A_ : Optional[str] = "pil" , A_ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
lowerCamelCase_ = len(A_ )
lowerCamelCase_ = self.transformer.config.sample_size
lowerCamelCase_ = self.transformer.config.in_channels
lowerCamelCase_ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=A_ , device=self.device , dtype=self.transformer.dtype , )
lowerCamelCase_ = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
lowerCamelCase_ = torch.tensor(A_ , device=self.device ).reshape(-1 )
lowerCamelCase_ = torch.tensor([1000] * batch_size , device=self.device )
lowerCamelCase_ = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(A_ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
lowerCamelCase_ = latent_model_input[: len(A_ ) // 2]
lowerCamelCase_ = torch.cat([half, half] , dim=0 )
lowerCamelCase_ = self.scheduler.scale_model_input(A_ , A_ )
lowerCamelCase_ = t
if not torch.is_tensor(A_ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
lowerCamelCase_ = latent_model_input.device.type == 'mps'
if isinstance(A_ , A_ ):
lowerCamelCase_ = torch.floataa if is_mps else torch.floataa
else:
lowerCamelCase_ = torch.intaa if is_mps else torch.intaa
lowerCamelCase_ = torch.tensor([timesteps] , dtype=A_ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
lowerCamelCase_ = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowerCamelCase_ = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
lowerCamelCase_ = self.transformer(
A_ , timestep=A_ , class_labels=A_ ).sample
# perform guidance
if guidance_scale > 1:
lowerCamelCase_ , lowerCamelCase_ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
lowerCamelCase_ , lowerCamelCase_ = torch.split(A_ , len(A_ ) // 2 , dim=0 )
lowerCamelCase_ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
lowerCamelCase_ = torch.cat([half_eps, half_eps] , dim=0 )
lowerCamelCase_ = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
lowerCamelCase_ , lowerCamelCase_ = torch.split(A_ , A_ , dim=1 )
else:
lowerCamelCase_ = noise_pred
# compute previous image: x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(A_ , A_ , A_ ).prev_sample
if guidance_scale > 1:
lowerCamelCase_ , lowerCamelCase_ = latent_model_input.chunk(2 , dim=0 )
else:
lowerCamelCase_ = latent_model_input
lowerCamelCase_ = 1 / self.vae.config.scaling_factor * latents
lowerCamelCase_ = self.vae.decode(A_ ).sample
lowerCamelCase_ = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase_ = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(A_ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=A_ )
| 651 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowerCamelCase : List[Any] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"text-classification",
"language-modeling",
"summarization",
"token-classification",
"question-answering",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase : Tuple = logging.getLogger()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('-f' )
lowerCamelCase_ = parser.parse_args()
return args.f
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Dict="eval" ):
'''simple docstring'''
lowerCamelCase_ = os.path.join(lowercase , f"""{split}_results.json""" )
if os.path.exists(lowercase ):
with open(lowercase , 'r' ) as f:
return json.load(lowercase )
raise ValueError(f"""can't find {path}""" )
lowerCamelCase : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_flax_glue.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
@slow
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_clm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertLess(result['eval_perplexity'] , 100 )
@slow
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_summarization_flax.main()
lowerCamelCase_ = get_results(A_ , split='test' )
self.assertGreaterEqual(result['test_rouge1'] , 10 )
self.assertGreaterEqual(result['test_rouge2'] , 2 )
self.assertGreaterEqual(result['test_rougeL'] , 7 )
self.assertGreaterEqual(result['test_rougeLsum'] , 7 )
@slow
def a__ ( self : Optional[int] ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_mlm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertLess(result['eval_perplexity'] , 42 )
@slow
def a__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_ta_mlm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.42 )
@slow
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = 7 if get_gpu_count() > 1 else 2
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_flax_ner.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
self.assertGreaterEqual(result['eval_f1'] , 0.3 )
@slow
def a__ ( self : str ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_qa.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_f1'] , 30 )
self.assertGreaterEqual(result['eval_exact'] , 30 )
| 651 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __a ( _UpperCamelCase: str ) -> Optional[Any]:
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __a ( _UpperCamelCase: Optional[Any] ) -> List[Any]:
"""simple docstring"""
_snake_case = create_tensor(_UpperCamelCase )
_snake_case = gather(_UpperCamelCase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def __a ( _UpperCamelCase: Dict ) -> str:
"""simple docstring"""
_snake_case = [state.process_index]
_snake_case = gather_object(_UpperCamelCase )
assert len(_UpperCamelCase ) == state.num_processes, F"""{gathered_obj}, {len(_UpperCamelCase )} != {state.num_processes}"""
assert gathered_obj == list(range(state.num_processes ) ), F"""{gathered_obj} != {list(range(state.num_processes ) )}"""
def __a ( _UpperCamelCase: List[str] ) -> List[Any]:
"""simple docstring"""
_snake_case = create_tensor(_UpperCamelCase )
_snake_case = broadcast(_UpperCamelCase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def __a ( _UpperCamelCase: Tuple ) -> Optional[int]:
"""simple docstring"""
if state.is_main_process:
_snake_case = torch.arange(state.num_processes + 1 ).to(state.device )
else:
_snake_case = torch.arange(state.num_processes ).to(state.device )
_snake_case = pad_across_processes(_UpperCamelCase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def __a ( _UpperCamelCase: Optional[int] ) -> str:
"""simple docstring"""
if state.num_processes != 2:
return
_snake_case = create_tensor(_UpperCamelCase )
_snake_case = reduce(_UpperCamelCase , "sum" )
_snake_case = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(_UpperCamelCase , _UpperCamelCase ), F"""{reduced_tensor} != {truth_tensor}"""
def __a ( _UpperCamelCase: str ) -> Optional[Any]:
"""simple docstring"""
if state.num_processes != 2:
return
_snake_case = create_tensor(_UpperCamelCase )
_snake_case = reduce(_UpperCamelCase , "mean" )
_snake_case = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(_UpperCamelCase , _UpperCamelCase ), F"""{reduced_tensor} != {truth_tensor}"""
def __a ( _UpperCamelCase: str ) -> List[str]:
"""simple docstring"""
main()
def __a ( ) -> Optional[int]:
"""simple docstring"""
_snake_case = PartialState()
state.print(F"""State: {state}""" )
state.print("testing gather" )
test_gather(_UpperCamelCase )
state.print("testing gather_object" )
test_gather_object(_UpperCamelCase )
state.print("testing broadcast" )
test_broadcast(_UpperCamelCase )
state.print("testing pad_across_processes" )
test_pad_across_processes(_UpperCamelCase )
state.print("testing reduce_sum" )
test_reduce_sum(_UpperCamelCase )
state.print("testing reduce_mean" )
test_reduce_mean(_UpperCamelCase )
if __name__ == "__main__":
main()
| 185 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : int = {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""",
}
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Union[str, Any] = 'lxmert'
__snake_case :Union[str, Any] = {}
def __init__( self : List[str] , _lowerCAmelCase : Dict=3_0522 , _lowerCAmelCase : List[str]=768 , _lowerCAmelCase : Union[str, Any]=12 , _lowerCAmelCase : Union[str, Any]=9500 , _lowerCAmelCase : Union[str, Any]=1600 , _lowerCAmelCase : Optional[Any]=400 , _lowerCAmelCase : Tuple=3072 , _lowerCAmelCase : List[Any]="gelu" , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : Tuple=512 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : Optional[Any]=0.02 , _lowerCAmelCase : List[str]=1e-12 , _lowerCAmelCase : Any=9 , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : Any=5 , _lowerCAmelCase : Dict=2048 , _lowerCAmelCase : int=4 , _lowerCAmelCase : Optional[Any]=6.67 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : int=True , _lowerCAmelCase : int=True , _lowerCAmelCase : str=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : int=True , _lowerCAmelCase : int=True , **_lowerCAmelCase : Tuple , ) -> Dict:
"""simple docstring"""
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = num_qa_labels
__lowercase = num_object_labels
__lowercase = num_attr_labels
__lowercase = l_layers
__lowercase = x_layers
__lowercase = r_layers
__lowercase = visual_feat_dim
__lowercase = visual_pos_dim
__lowercase = visual_loss_normalizer
__lowercase = task_matched
__lowercase = task_mask_lm
__lowercase = task_obj_predict
__lowercase = task_qa
__lowercase = visual_obj_loss
__lowercase = visual_attr_loss
__lowercase = visual_feat_loss
__lowercase = {"""vision""": r_layers, """cross_encoder""": x_layers, """language""": l_layers}
super().__init__(**_lowerCAmelCase )
| 80 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowerCAmelCase( a__ : Tuple ):
'''simple docstring'''
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def lowerCAmelCase( a__ : Any , a__ : List[str] ):
'''simple docstring'''
lowerCamelCase__ = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
lowerCamelCase__ = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" )
lowerCamelCase__ = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" )
lowerCamelCase__ = key.replace("heads.cmd.itm_head.cls" , "itm_head" )
lowerCamelCase__ = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" )
lowerCamelCase__ = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" )
lowerCamelCase__ = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" )
lowerCamelCase__ = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" )
lowerCamelCase__ = key.replace("mm_text_projection" , "flava.text_to_mm_projection" )
lowerCamelCase__ = key.replace("mm_image_projection" , "flava.image_to_mm_projection" )
lowerCamelCase__ = key.replace("image_encoder.module" , "flava.image_model" )
lowerCamelCase__ = key.replace("text_encoder.module" , "flava.text_model" )
lowerCamelCase__ = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" )
lowerCamelCase__ = key.replace("mm_encoder.module" , "flava.multimodal_model" )
lowerCamelCase__ = key.replace("text_projection" , "flava.text_projection" )
lowerCamelCase__ = key.replace("image_projection" , "flava.image_projection" )
lowerCamelCase__ = value.float()
for key, value in codebook_state_dict.items():
lowerCamelCase__ = value
return upgrade
@torch.no_grad()
def lowerCAmelCase( a__ : Any , a__ : List[str] , a__ : str , a__ : Tuple=None ):
'''simple docstring'''
if config_path is not None:
lowerCamelCase__ = FlavaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
else:
lowerCamelCase__ = FlavaConfig()
lowerCamelCase__ = FlavaForPreTraining(SCREAMING_SNAKE_CASE__ ).eval()
lowerCamelCase__ = convert_dalle_checkpoint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , save_checkpoint=SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = torch.load(SCREAMING_SNAKE_CASE__ , map_location="cpu" )
else:
lowerCamelCase__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location="cpu" )
lowerCamelCase__ = upgrade_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
hf_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = hf_model.state_dict()
lowerCamelCase__ = count_parameters(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = count_parameters(SCREAMING_SNAKE_CASE__ ) + count_parameters(SCREAMING_SNAKE_CASE__ )
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
lowerCAmelCase_ = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 704 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def lowerCAmelCase( a__ : List[str] , a__ : str , a__ : List[Any]=None , **a__ : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ = [x.strip() for x in open(a__ ).readlines()]
lowerCamelCase__ = [x.strip() for x in open(a__ ).readlines()][: len(a__ )]
lowerCamelCase__ = calculate_rouge(a__ , a__ , **a__ )
if save_path is not None:
save_json(a__ , a__ , indent=a__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 426 | 0 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_lowerCamelCase : Any = logging.get_logger(__name__)
class __snake_case (_a ):
lowerCAmelCase__ = "AutoTokenizer"
lowerCAmelCase__ = ["tokenizer"]
lowerCAmelCase__ = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int=None ) -> str:
'''simple docstring'''
super().__init__(_UpperCAmelCase )
_lowerCAmelCase : List[Any] = speaker_embeddings
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[str]="speaker_embeddings_path.json" , **_UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
_lowerCAmelCase : Union[str, Any] = get_file_from_repo(
_UpperCAmelCase , _UpperCAmelCase , subfolder=kwargs.pop("""subfolder""" , _UpperCAmelCase ) , cache_dir=kwargs.pop("""cache_dir""" , _UpperCAmelCase ) , force_download=kwargs.pop("""force_download""" , _UpperCAmelCase ) , proxies=kwargs.pop("""proxies""" , _UpperCAmelCase ) , resume_download=kwargs.pop("""resume_download""" , _UpperCAmelCase ) , local_files_only=kwargs.pop("""local_files_only""" , _UpperCAmelCase ) , use_auth_token=kwargs.pop("""use_auth_token""" , _UpperCAmelCase ) , revision=kwargs.pop("""revision""" , _UpperCAmelCase ) , )
if speaker_embeddings_path is None:
logger.warning(
f"`{os.path.join(_UpperCAmelCase , _UpperCAmelCase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." )
_lowerCAmelCase : Union[str, Any] = None
else:
with open(_UpperCAmelCase ) as speaker_embeddings_json:
_lowerCAmelCase : List[Any] = json.load(_UpperCAmelCase )
else:
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
return cls(tokenizer=_UpperCAmelCase , speaker_embeddings=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : int="speaker_embeddings_path.json" , _UpperCAmelCase : List[str]="speaker_embeddings" , _UpperCAmelCase : bool = False , **_UpperCAmelCase : Union[str, Any] , ) -> int:
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_UpperCAmelCase , _UpperCAmelCase , """v2""" ) , exist_ok=_UpperCAmelCase )
_lowerCAmelCase : Any = {}
_lowerCAmelCase : List[str] = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_lowerCAmelCase : List[str] = self._load_voice_preset(_UpperCAmelCase )
_lowerCAmelCase : Tuple = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , _UpperCAmelCase , f"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=_UpperCAmelCase , )
_lowerCAmelCase : str = os.path.join(_UpperCAmelCase , f"{prompt_key}_{key}.npy" )
_lowerCAmelCase : int = tmp_dict
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , """w""" ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
super().save_pretrained(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : str = None , **_UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : Tuple = self.speaker_embeddings[voice_preset]
_lowerCAmelCase : str = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." )
_lowerCAmelCase : Tuple = get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , _UpperCAmelCase ) , cache_dir=kwargs.pop("""cache_dir""" , _UpperCAmelCase ) , force_download=kwargs.pop("""force_download""" , _UpperCAmelCase ) , proxies=kwargs.pop("""proxies""" , _UpperCAmelCase ) , resume_download=kwargs.pop("""resume_download""" , _UpperCAmelCase ) , local_files_only=kwargs.pop("""local_files_only""" , _UpperCAmelCase ) , use_auth_token=kwargs.pop("""use_auth_token""" , _UpperCAmelCase ) , revision=kwargs.pop("""revision""" , _UpperCAmelCase ) , )
if path is None:
raise ValueError(
f"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." )
_lowerCAmelCase : int = np.load(_UpperCAmelCase )
return voice_preset_dict
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCAmelCase : Optional[dict] = None ) -> Optional[Any]:
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f"Voice preset unrecognized, missing {key} as a key." )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
def __call__( self : List[Any] , _UpperCAmelCase : Any=None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Any="pt" , _UpperCAmelCase : List[str]=256 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : str=True , _UpperCAmelCase : int=False , **_UpperCAmelCase : List[str] , ) -> str:
'''simple docstring'''
if voice_preset is not None and not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
if (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_lowerCAmelCase : List[Any] = self._load_voice_preset(_UpperCAmelCase )
else:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not voice_preset.endswith(""".npz""" ):
_lowerCAmelCase : Union[str, Any] = voice_preset + """.npz"""
_lowerCAmelCase : int = np.load(_UpperCAmelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase : Tuple = BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
_lowerCAmelCase : Any = self.tokenizer(
_UpperCAmelCase , return_tensors=_UpperCAmelCase , padding="""max_length""" , max_length=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
if voice_preset is not None:
_lowerCAmelCase : Optional[int] = voice_preset
return encoded_text
| 429 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __snake_case :
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
# Automatically constructed
lowerCAmelCase__ = "dict"
lowerCAmelCase__ = None
lowerCAmelCase__ = field(default="Translation" , init=_a , repr=_a )
def __call__( self : Tuple ) -> Optional[int]:
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class __snake_case :
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
# Automatically constructed
lowerCAmelCase__ = "dict"
lowerCAmelCase__ = None
lowerCAmelCase__ = field(default="TranslationVariableLanguages" , init=_a , repr=_a )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = sorted(set(self.languages ) ) if self.languages else None
_lowerCAmelCase : Optional[int] = len(self.languages ) if self.languages else None
def __call__( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCAmelCase : List[str] ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = set(self.languages )
if self.languages and set(_UpperCAmelCase ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(_UpperCAmelCase ) - lang_set ) )}) are not in valid set ({', '.join(_UpperCAmelCase )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
_lowerCAmelCase : Dict = []
for lang, text in translation_dict.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
_lowerCAmelCase , _lowerCAmelCase : int = zip(*sorted(_UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def SCREAMING_SNAKE_CASE ( self : str ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 429 | 1 |
snake_case_ : int =[
'''Audio''',
'''Array2D''',
'''Array3D''',
'''Array4D''',
'''Array5D''',
'''ClassLabel''',
'''Features''',
'''Sequence''',
'''Value''',
'''Image''',
'''Translation''',
'''TranslationVariableLanguages''',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 718 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class a__ ( unittest.TestCase ):
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=99 , lowercase__=32 , lowercase__=5 , lowercase__=4 , lowercase__=37 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=16 , lowercase__=2 , lowercase__=0.02 , lowercase__=4 , ) -> Dict:
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_attention_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = num_choices
def _lowerCamelCase ( self ) -> str:
__A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A = None
if self.use_attention_mask:
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self ) -> int:
__A = self.prepare_config_and_inputs()
__A , __A , __A , __A = config_and_inputs
__A = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def _lowerCamelCase ( self ) -> Union[str, Any]:
__A = self.prepare_config_and_inputs()
__A , __A , __A , __A = config_and_inputs
__A = True
__A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a__ ( lowerCAmelCase__ , unittest.TestCase ):
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : Any = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self ) -> List[Any]:
__A = FlaxRobertaModelTester(self )
@slow
def _lowerCamelCase ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
__A = model_class_name.from_pretrained("roberta-base" , from_pt=lowercase__ )
__A = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase__ )
| 205 | 0 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCamelCase_ = logging.get_logger(__name__)
class lowercase_ ( UpperCAmelCase_ ):
"""simple docstring"""
def lowerCAmelCase_ ( self : int , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
if isinstance(a_ , a_ ):
_SCREAMING_SNAKE_CASE = [label.strip() for label in labels.split("," ) if label.strip()]
return labels
def __call__( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ):
"""simple docstring"""
if len(a_ ) == 0 or len(a_ ) == 0:
raise ValueError("You must include at least one label and at least one sequence." )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. "
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(a_ ) )
if isinstance(a_ , a_ ):
_SCREAMING_SNAKE_CASE = [sequences]
_SCREAMING_SNAKE_CASE = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(a_ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(UpperCAmelCase_ )
class lowercase_ ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCamelCase : Tuple=ZeroShotClassificationArgumentHandler() , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Optional[int] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = args_parser
super().__init__(*a_ , **a_ )
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." )
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("entail" ):
return ind
return -1
def lowerCAmelCase_ ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : str=TruncationStrategy.ONLY_FIRST , **__lowerCamelCase : Any ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`" )
_SCREAMING_SNAKE_CASE = self.tokenizer.eos_token
try:
_SCREAMING_SNAKE_CASE = self.tokenizer(
a_ , add_special_tokens=a_ , return_tensors=a_ , padding=a_ , truncation=a_ , )
except Exception as e:
if "too short" in str(a_ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
_SCREAMING_SNAKE_CASE = self.tokenizer(
a_ , add_special_tokens=a_ , return_tensors=a_ , padding=a_ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def lowerCAmelCase_ ( self : List[str] , **__lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
if kwargs.get("multi_class" , a_ ) is not None:
_SCREAMING_SNAKE_CASE = kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers." )
_SCREAMING_SNAKE_CASE = {}
if "candidate_labels" in kwargs:
_SCREAMING_SNAKE_CASE = self._args_parser._parse_labels(kwargs["candidate_labels"] )
if "hypothesis_template" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["hypothesis_template"]
_SCREAMING_SNAKE_CASE = {}
if "multi_label" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__( self : Any , __lowerCamelCase : Union[str, List[str]] , *__lowerCamelCase : str , **__lowerCamelCase : Dict , ):
"""simple docstring"""
if len(a_ ) == 0:
pass
elif len(a_ ) == 1 and "candidate_labels" not in kwargs:
_SCREAMING_SNAKE_CASE = args[0]
else:
raise ValueError(F"""Unable to understand extra arguments {args}""" )
return super().__call__(a_ , **a_ )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple="This example is {}." ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self._args_parser(a_ , a_ , a_ )
for i, (candidate_label, sequence_pair) in enumerate(zip(a_ , a_ ) ):
_SCREAMING_SNAKE_CASE = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(a_ ) - 1,
**model_input,
}
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCamelCase : Any ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = inputs["candidate_label"]
_SCREAMING_SNAKE_CASE = inputs["sequence"]
_SCREAMING_SNAKE_CASE = {k: inputs[k] for k in self.tokenizer.model_input_names}
_SCREAMING_SNAKE_CASE = self.model(**a_ )
_SCREAMING_SNAKE_CASE = {
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple=False ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [outputs["candidate_label"] for outputs in model_outputs]
_SCREAMING_SNAKE_CASE = [outputs["sequence"] for outputs in model_outputs]
_SCREAMING_SNAKE_CASE = np.concatenate([output["logits"].numpy() for output in model_outputs] )
_SCREAMING_SNAKE_CASE = logits.shape[0]
_SCREAMING_SNAKE_CASE = len(a_ )
_SCREAMING_SNAKE_CASE = N // n
_SCREAMING_SNAKE_CASE = logits.reshape((num_sequences, n, -1) )
if multi_label or len(a_ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
_SCREAMING_SNAKE_CASE = self.entailment_id
_SCREAMING_SNAKE_CASE = -1 if entailment_id == 0 else 0
_SCREAMING_SNAKE_CASE = reshaped_outputs[..., [contradiction_id, entailment_id]]
_SCREAMING_SNAKE_CASE = np.exp(a_ ) / np.exp(a_ ).sum(-1 , keepdims=a_ )
_SCREAMING_SNAKE_CASE = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
_SCREAMING_SNAKE_CASE = reshaped_outputs[..., self.entailment_id]
_SCREAMING_SNAKE_CASE = np.exp(a_ ) / np.exp(a_ ).sum(-1 , keepdims=a_ )
_SCREAMING_SNAKE_CASE = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 418 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def _UpperCamelCase ( self : int ):
"""simple docstring"""
lowerCamelCase__ = pipeline(
task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""" )
lowerCamelCase__ = load_dataset("""ashraq/esc50""" )
lowerCamelCase__ = dataset["""train"""]["""audio"""][-1]["""array"""]
lowerCamelCase__ = audio_classifier(a_ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(a_ ) , [{"""score""": 0.5_0_1, """label""": """Sound of a dog"""}, {"""score""": 0.4_9_9, """label""": """Sound of vaccum cleaner"""}] , )
@unittest.skip("""No models are available in TF""" )
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
@require_torch
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ = pipeline(
task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , )
# This is an audio of a dog
lowerCamelCase__ = load_dataset("""ashraq/esc50""" )
lowerCamelCase__ = dataset["""train"""]["""audio"""][-1]["""array"""]
lowerCamelCase__ = audio_classifier(a_ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(a_ ) , [
{"""score""": 0.9_9_9, """label""": """Sound of a dog"""},
{"""score""": 0.0_0_1, """label""": """Sound of vaccum cleaner"""},
] , )
lowerCamelCase__ = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(a_ ) , [
[
{"""score""": 0.9_9_9, """label""": """Sound of a dog"""},
{"""score""": 0.0_0_1, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
lowerCamelCase__ = audio_classifier(
[audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5 )
self.assertEqual(
nested_simplify(a_ ) , [
[
{"""score""": 0.9_9_9, """label""": """Sound of a dog"""},
{"""score""": 0.0_0_1, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
@unittest.skip("""No models are available in TF""" )
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
pass
| 165 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """falcon"""
UpperCamelCase_ = ["""past_key_values"""]
def __init__( self : Optional[int] , UpperCamelCase__ : List[Any]=6_5024 , UpperCamelCase__ : Tuple=4544 , UpperCamelCase__ : Any=32 , UpperCamelCase__ : Optional[int]=71 , UpperCamelCase__ : int=1E-5 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : int=None , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Dict=11 , UpperCamelCase__ : Optional[int]=11 , **UpperCamelCase__ : str , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = vocab_size
# Backward compatibility with n_embed kwarg
SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop('''n_embed''' , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = hidden_size if n_embed is None else n_embed
SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : List[str] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : int = use_cache
SCREAMING_SNAKE_CASE : List[str] = hidden_dropout
SCREAMING_SNAKE_CASE : Any = attention_dropout
SCREAMING_SNAKE_CASE : int = bos_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = eos_token_id
SCREAMING_SNAKE_CASE : Any = num_attention_heads if num_kv_heads is None else num_kv_heads
SCREAMING_SNAKE_CASE : int = alibi
SCREAMING_SNAKE_CASE : Optional[Any] = new_decoder_architecture
SCREAMING_SNAKE_CASE : Any = multi_query # Ignored when new_decoder_architecture is True
SCREAMING_SNAKE_CASE : Tuple = parallel_attn
SCREAMING_SNAKE_CASE : List[Any] = bias
super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
@property
def __A ( self : int ):
'''simple docstring'''
return self.hidden_size // self.num_attention_heads
@property
def __A ( self : List[str] ):
'''simple docstring'''
return not self.alibi
| 34 | import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""")
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
])
class lowercase__ ( unittest.TestCase):
def __A ( self : Any ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=UpperCamelCase__ , )
assert hasattr(self , '''env''' )
def __A ( self : str , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = f"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
SCREAMING_SNAKE_CASE : Any = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=UpperCamelCase__ , instance_count=UpperCamelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase__ , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase__ , py_version='''py36''' , )
def __A ( self : Optional[Any] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
TrainingJobAnalytics(UpperCamelCase__ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def __A ( self : Tuple , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.create_estimator(UpperCamelCase__ )
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE : List[str] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
SCREAMING_SNAKE_CASE : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE : List[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , UpperCamelCase__ )
| 34 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__lowerCAmelCase = [
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__lowerCAmelCase = 'UperNetConfig'
class _lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 0 , UpperCAmelCase = False , UpperCAmelCase = 1 , ) -> None:
super().__init__()
_snake_case = nn.Convad(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , kernel_size=UpperCAmelCase , padding=UpperCAmelCase , bias=UpperCAmelCase , dilation=UpperCAmelCase , )
_snake_case = nn.BatchNormad(UpperCAmelCase )
_snake_case = nn.ReLU()
def lowercase (self , UpperCAmelCase ) -> torch.Tensor:
_snake_case = self.conv(UpperCAmelCase )
_snake_case = self.batch_norm(UpperCAmelCase )
_snake_case = self.activation(UpperCAmelCase )
return output
class _lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> None:
super().__init__()
_snake_case = [
nn.AdaptiveAvgPoolad(UpperCAmelCase ),
UperNetConvModule(UpperCAmelCase , UpperCAmelCase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(UpperCAmelCase ) , UpperCAmelCase )
def lowercase (self , UpperCAmelCase ) -> torch.Tensor:
_snake_case = input
for layer in self.layers:
_snake_case = layer(UpperCAmelCase )
return hidden_state
class _lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> None:
super().__init__()
_snake_case = pool_scales
_snake_case = align_corners
_snake_case = in_channels
_snake_case = channels
_snake_case = []
for i, pool_scale in enumerate(UpperCAmelCase ):
_snake_case = UperNetPyramidPoolingBlock(pool_scale=UpperCAmelCase , in_channels=UpperCAmelCase , channels=UpperCAmelCase )
self.blocks.append(UpperCAmelCase )
self.add_module(str(UpperCAmelCase ) , UpperCAmelCase )
def lowercase (self , UpperCAmelCase ) -> List[torch.Tensor]:
_snake_case = []
for ppm in self.blocks:
_snake_case = ppm(UpperCAmelCase )
_snake_case = nn.functional.interpolate(
UpperCAmelCase , size=x.size()[2:] , mode="""bilinear""" , align_corners=self.align_corners )
ppm_outs.append(UpperCAmelCase )
return ppm_outs
class _lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
super().__init__()
_snake_case = config
_snake_case = config.pool_scales # e.g. (1, 2, 3, 6)
_snake_case = in_channels
_snake_case = config.hidden_size
_snake_case = False
_snake_case = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
_snake_case = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
_snake_case = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
_snake_case = nn.ModuleList()
_snake_case = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
_snake_case = UperNetConvModule(UpperCAmelCase , self.channels , kernel_size=1 )
_snake_case = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(UpperCAmelCase )
self.fpn_convs.append(UpperCAmelCase )
_snake_case = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def lowercase (self ) -> List[Any]:
self.apply(self._init_weights )
def lowercase (self , UpperCAmelCase ) -> str:
if isinstance(UpperCAmelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def lowercase (self , UpperCAmelCase ) -> Any:
_snake_case = inputs[-1]
_snake_case = [x]
psp_outs.extend(self.psp_modules(UpperCAmelCase ) )
_snake_case = torch.cat(UpperCAmelCase , dim=1 )
_snake_case = self.bottleneck(UpperCAmelCase )
return output
def lowercase (self , UpperCAmelCase ) -> torch.Tensor:
# build laterals
_snake_case = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(UpperCAmelCase ) )
# build top-down path
_snake_case = len(UpperCAmelCase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
_snake_case = laterals[i - 1].shape[2:]
_snake_case = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=UpperCAmelCase , mode="""bilinear""" , align_corners=self.align_corners )
# build outputs
_snake_case = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
_snake_case = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="""bilinear""" , align_corners=self.align_corners )
_snake_case = torch.cat(UpperCAmelCase , dim=1 )
_snake_case = self.fpn_bottleneck(UpperCAmelCase )
_snake_case = self.classifier(UpperCAmelCase )
return output
class _lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase = 2 , UpperCAmelCase = 3 , UpperCAmelCase = 1 ) -> None:
super().__init__()
_snake_case = config
_snake_case = config.auxiliary_in_channels
_snake_case = config.auxiliary_channels
_snake_case = config.auxiliary_num_convs
_snake_case = config.auxiliary_concat_input
_snake_case = in_index
_snake_case = (kernel_size // 2) * dilation
_snake_case = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=UpperCAmelCase , padding=UpperCAmelCase , dilation=UpperCAmelCase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=UpperCAmelCase , padding=UpperCAmelCase , dilation=UpperCAmelCase ) )
if self.num_convs == 0:
_snake_case = nn.Identity()
else:
_snake_case = nn.Sequential(*UpperCAmelCase )
if self.concat_input:
_snake_case = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=UpperCAmelCase , padding=kernel_size // 2 )
_snake_case = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def lowercase (self ) -> List[str]:
self.apply(self._init_weights )
def lowercase (self , UpperCAmelCase ) -> int:
if isinstance(UpperCAmelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def lowercase (self , UpperCAmelCase ) -> torch.Tensor:
# just take the relevant feature maps
_snake_case = encoder_hidden_states[self.in_index]
_snake_case = self.convs(UpperCAmelCase )
if self.concat_input:
_snake_case = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
_snake_case = self.classifier(UpperCAmelCase )
return output
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = UperNetConfig
lowerCAmelCase_ = "pixel_values"
lowerCAmelCase_ = True
def lowercase (self , UpperCAmelCase ) -> List[Any]:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def lowercase (self ) -> List[str]:
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def lowercase (self , UpperCAmelCase , UpperCAmelCase=False ) -> Optional[Any]:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
_snake_case = value
__lowerCAmelCase = r'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__lowerCAmelCase = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." , __snake_case , )
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , UpperCAmelCase ) -> List[Any]:
super().__init__(UpperCAmelCase )
_snake_case = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
_snake_case = UperNetHead(UpperCAmelCase , in_channels=self.backbone.channels )
_snake_case = UperNetFCNHead(UpperCAmelCase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("""batch_size, sequence_length""" ) )
@replace_return_docstrings(output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC )
def lowercase (self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Union[tuple, SemanticSegmenterOutput]:
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = output_attentions if output_attentions is not None else self.config.output_attentions
_snake_case = self.backbone.forward_with_filtered_kwargs(
UpperCAmelCase , output_hidden_states=UpperCAmelCase , output_attentions=UpperCAmelCase )
_snake_case = outputs.feature_maps
_snake_case = self.decode_head(UpperCAmelCase )
_snake_case = nn.functional.interpolate(UpperCAmelCase , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=UpperCAmelCase )
_snake_case = None
if self.auxiliary_head is not None:
_snake_case = self.auxiliary_head(UpperCAmelCase )
_snake_case = nn.functional.interpolate(
UpperCAmelCase , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=UpperCAmelCase )
_snake_case = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("""The number of labels should be greater than one""" )
else:
# compute weighted loss
_snake_case = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
_snake_case = loss_fct(UpperCAmelCase , UpperCAmelCase )
_snake_case = loss_fct(UpperCAmelCase , UpperCAmelCase )
_snake_case = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
_snake_case = (logits,) + outputs[1:]
else:
_snake_case = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=UpperCAmelCase , logits=UpperCAmelCase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) | 585 |
'''simple docstring'''
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase ) -> Any:
_snake_case = name
_snake_case = val
def __str__(self ) -> List[str]:
return f"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__(self , UpperCAmelCase ) -> Any:
return self.val < other.val
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase ) -> Dict:
_snake_case = {}
_snake_case = {}
_snake_case = self.build_heap(UpperCAmelCase )
def __getitem__(self , UpperCAmelCase ) -> Union[str, Any]:
return self.get_value(UpperCAmelCase )
def lowercase (self , UpperCAmelCase ) -> Dict:
return (idx - 1) // 2
def lowercase (self , UpperCAmelCase ) -> Optional[Any]:
return idx * 2 + 1
def lowercase (self , UpperCAmelCase ) -> Optional[int]:
return idx * 2 + 2
def lowercase (self , UpperCAmelCase ) -> Union[str, Any]:
return self.heap_dict[key]
def lowercase (self , UpperCAmelCase ) -> str:
_snake_case = len(UpperCAmelCase ) - 1
_snake_case = self.get_parent_idx(UpperCAmelCase )
for idx, i in enumerate(UpperCAmelCase ):
_snake_case = idx
_snake_case = i.val
for i in range(UpperCAmelCase , -1 , -1 ):
self.sift_down(UpperCAmelCase , UpperCAmelCase )
return array
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> int:
while True:
_snake_case = self.get_left_child_idx(UpperCAmelCase ) # noqa: E741
_snake_case = self.get_right_child_idx(UpperCAmelCase )
_snake_case = idx
if l < len(UpperCAmelCase ) and array[l] < array[idx]:
_snake_case = l
if r < len(UpperCAmelCase ) and array[r] < array[smallest]:
_snake_case = r
if smallest != idx:
_snake_case, _snake_case = array[smallest], array[idx]
(
(
_snake_case
), (
_snake_case
),
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
_snake_case = smallest
else:
break
def lowercase (self , UpperCAmelCase ) -> str:
_snake_case = self.get_parent_idx(UpperCAmelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
_snake_case, _snake_case = self.heap[idx], self.heap[p]
_snake_case, _snake_case = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
_snake_case = p
_snake_case = self.get_parent_idx(UpperCAmelCase )
def lowercase (self ) -> Optional[int]:
return self.heap[0]
def lowercase (self ) -> List[Any]:
_snake_case, _snake_case = self.heap[-1], self.heap[0]
_snake_case, _snake_case = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
_snake_case = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowercase (self , UpperCAmelCase ) -> List[str]:
self.heap.append(UpperCAmelCase )
_snake_case = len(self.heap ) - 1
_snake_case = node.val
self.sift_up(len(self.heap ) - 1 )
def lowercase (self ) -> int:
return len(self.heap ) == 0
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
_snake_case = new_value
_snake_case = new_value
self.sift_up(self.idx_of_element[node] )
__lowerCAmelCase = Node('R', -1)
__lowerCAmelCase = Node('B', 6)
__lowerCAmelCase = Node('A', 3)
__lowerCAmelCase = Node('X', 1)
__lowerCAmelCase = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__lowerCAmelCase = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod() | 585 | 1 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = ""
UpperCAmelCase__ : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
UpperCAmelCase__ : str = None # compression type in fsspec. ex: "gzip"
UpperCAmelCase__ : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , A_ = "" , A_ = None , A_ = None , **A_ ) -> Optional[Any]:
super().__init__(self , **A_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
__UpperCamelCase =fsspec.open(
A_ , mode='rb' , protocol=A_ , compression=self.compression , client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
__UpperCamelCase =os.path.basename(self.file.path.split('::' )[0] )
__UpperCamelCase =(
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
__UpperCamelCase =None
@classmethod
def _a ( cls , A_ ) -> List[Any]:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(A_ ).lstrip('/' )
def _a ( self ) -> List[str]:
if self.dir_cache is None:
__UpperCamelCase ={**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
__UpperCamelCase ={f['name']: f}
def _a ( self , A_ ) -> Dict:
return self.file.open().read()
def _a ( self , A_ , A_ = "rb" , A_=None , A_=True , A_=None , **A_ , ) -> Union[str, Any]:
__UpperCamelCase =self._strip_protocol(A_ )
if mode != "rb":
raise ValueError(f'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'' )
return self.file.open()
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : int = "bz2"
UpperCAmelCase__ : Union[str, Any] = "bz2"
UpperCAmelCase__ : Dict = ".bz2"
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Any = "gzip"
UpperCAmelCase__ : Optional[int] = "gzip"
UpperCAmelCase__ : List[Any] = ".gz"
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = "lz4"
UpperCAmelCase__ : List[Any] = "lz4"
UpperCAmelCase__ : Optional[int] = ".lz4"
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "xz"
UpperCAmelCase__ : Optional[Any] = "xz"
UpperCAmelCase__ : Dict = ".xz"
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "zstd"
UpperCAmelCase__ : Optional[int] = "zstd"
UpperCAmelCase__ : Union[str, Any] = ".zst"
def __init__( self , A_ , A_ = "rb" , A_ = None , A_ = None , A_ = DEFAULT_BLOCK_SIZE , **A_ , ) -> str:
super().__init__(
fo=A_ , mode=A_ , target_protocol=A_ , target_options=A_ , block_size=A_ , **A_ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
__UpperCamelCase =self.file.__enter__
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ ) -> Union[str, Any]:
__UpperCamelCase =file_
def __enter__( self ) -> Any:
self._file.__enter__()
return self
def __exit__( self , *A_ , **A_ ) -> List[str]:
self._file.__exit__(*A_ , **A_ )
def __iter__( self ) -> List[str]:
return iter(self._file )
def _a ( self ) -> List[str]:
return next(self._file )
def __getattr__( self , A_ ) -> List[Any]:
return getattr(self._file , A_ )
def fixed_enter(*A_ , **A_ ):
return WrappedFile(_enter(*A_ , **A_ ) )
__UpperCamelCase =fixed_enter
| 682 |
import math
from collections.abc import Callable
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Callable[[float], float] , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
__UpperCamelCase =xa
__UpperCamelCase =xa
while True:
if x_n == x_na or function(SCREAMING_SNAKE_CASE__ ) == function(SCREAMING_SNAKE_CASE__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
__UpperCamelCase =x_na - (
function(SCREAMING_SNAKE_CASE__ ) / ((function(SCREAMING_SNAKE_CASE__ ) - function(SCREAMING_SNAKE_CASE__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
__UpperCamelCase =x_na
__UpperCamelCase =x_na
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float ):
return math.pow(SCREAMING_SNAKE_CASE__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 682 | 1 |
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
__SCREAMING_SNAKE_CASE ={
"""vocab_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json""",
},
"""merges_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""Salesforce/codegen-350M-mono""": (
"""https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"""
),
},
}
__SCREAMING_SNAKE_CASE ={
"""Salesforce/codegen-350M-mono""": 2_048,
}
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE__ : str = CodeGenTokenizer
def __init__( self: List[str] , _lowerCamelCase: List[str]=None , _lowerCamelCase: List[Any]=None , _lowerCamelCase: int=None , _lowerCamelCase: Optional[int]="<|endoftext|>" , _lowerCamelCase: List[str]="<|endoftext|>" , _lowerCamelCase: str="<|endoftext|>" , _lowerCamelCase: Optional[Any]=False , **_lowerCamelCase: Tuple , ):
super().__init__(
_lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , unk_token=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , **_lowerCamelCase , )
if kwargs.pop('''add_bos_token''' , _lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = kwargs.pop('''name_or_path''' , '''''' )
raise ValueError(
'''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'''
'''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'''
f"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"
f"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"
'''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'''
''' so that the fast tokenizer works correctly.''' )
SCREAMING_SNAKE_CASE_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE_ = getattr(_lowerCamelCase , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE_ = add_prefix_space
SCREAMING_SNAKE_CASE_ = pre_tok_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = add_prefix_space
def _A ( self: Union[str, Any] , *_lowerCamelCase: Dict , **_lowerCamelCase: List[str] ):
SCREAMING_SNAKE_CASE_ = kwargs.get('''is_split_into_words''' , _lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def _A ( self: str , *_lowerCamelCase: List[str] , **_lowerCamelCase: Optional[Any] ):
SCREAMING_SNAKE_CASE_ = kwargs.get('''is_split_into_words''' , _lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def _A ( self: Optional[int] , _lowerCamelCase: str , _lowerCamelCase: Optional[str] = None ):
SCREAMING_SNAKE_CASE_ = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def _A ( self: Tuple , _lowerCamelCase: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , _lowerCamelCase: bool = False , _lowerCamelCase: bool = None , _lowerCamelCase: Optional[List[str]] = None , **_lowerCamelCase: str , ):
SCREAMING_SNAKE_CASE_ = super().decode(
token_ids=_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase , **_lowerCamelCase , )
if truncate_before_pattern is not None and len(_lowerCamelCase ) > 0:
SCREAMING_SNAKE_CASE_ = self.truncate(_lowerCamelCase , _lowerCamelCase )
return decoded_text
def _A ( self: Optional[int] , _lowerCamelCase: Optional[int] , _lowerCamelCase: str ):
def find_re(_lowerCamelCase: List[str] , _lowerCamelCase: Tuple , _lowerCamelCase: str ):
SCREAMING_SNAKE_CASE_ = pattern.search(_lowerCamelCase , _lowerCamelCase )
return m.start() if m else -1
SCREAMING_SNAKE_CASE_ = [re.compile(_lowerCamelCase , re.MULTILINE ) for pattern in truncate_before_pattern]
SCREAMING_SNAKE_CASE_ = list(re.finditer('''^print''' , _lowerCamelCase , re.MULTILINE ) )
if len(_lowerCamelCase ) > 1:
SCREAMING_SNAKE_CASE_ = completion[: prints[1].start()]
SCREAMING_SNAKE_CASE_ = list(re.finditer('''^def''' , _lowerCamelCase , re.MULTILINE ) )
if len(_lowerCamelCase ) > 1:
SCREAMING_SNAKE_CASE_ = completion[: defs[1].start()]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = [
pos for pos in [find_re(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) for terminal in terminals] if pos != -1
]
if len(_lowerCamelCase ) > 0:
return completion[: min(_lowerCamelCase )]
else:
return completion
| 234 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = "Speech2TextFeatureExtractor"
SCREAMING_SNAKE_CASE__ : List[str] = "Speech2TextTokenizer"
def __init__( self: List[str] , _lowerCamelCase: str , _lowerCamelCase: Optional[Any] ):
super().__init__(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
def __call__( self: List[str] , *_lowerCamelCase: Dict , **_lowerCamelCase: List[str] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowerCamelCase , **_lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''raw_speech''' )
else:
SCREAMING_SNAKE_CASE_ = kwargs.pop('''audio''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''sampling_rate''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''text''' , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
SCREAMING_SNAKE_CASE_ = args[0]
SCREAMING_SNAKE_CASE_ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
SCREAMING_SNAKE_CASE_ = self.feature_extractor(_lowerCamelCase , *_lowerCamelCase , sampling_rate=_lowerCamelCase , **_lowerCamelCase )
if text is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer(_lowerCamelCase , **_lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ = encodings['''input_ids''']
return inputs
def _A ( self: List[str] , *_lowerCamelCase: List[Any] , **_lowerCamelCase: Union[str, Any] ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def _A ( self: Union[str, Any] , *_lowerCamelCase: str , **_lowerCamelCase: Optional[Any] ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@contextmanager
def _A ( self: List[Any] ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.tokenizer
yield
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
| 234 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( snake_case_ : int , snake_case_ : int ) -> str:
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
SCREAMING_SNAKE_CASE : Optional[Any] = str(bin(snake_case_ ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE : Optional[int] = str(bin(snake_case_ ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE : Dict = max(len(snake_case_ ) , len(snake_case_ ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(snake_case_ ) , b_binary.zfill(snake_case_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 220 |
'''simple docstring'''
from pathlib import Path
import numpy as np
from PIL import Image
def SCREAMING_SNAKE_CASE_ ( snake_case_ : np.ndarray ) -> np.ndarray:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def SCREAMING_SNAKE_CASE_ ( snake_case_ : np.ndarray ) -> np.ndarray:
return (gray > 127) & (gray <= 255)
def SCREAMING_SNAKE_CASE_ ( snake_case_ : np.ndarray , snake_case_ : np.ndarray ) -> np.ndarray:
SCREAMING_SNAKE_CASE : List[Any] = np.zeros_like(snake_case_ )
SCREAMING_SNAKE_CASE : Tuple = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
SCREAMING_SNAKE_CASE : Optional[int] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
SCREAMING_SNAKE_CASE : Optional[int] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
SCREAMING_SNAKE_CASE : int = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
__UpperCAmelCase = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg'
__UpperCAmelCase = np.array(Image.open(lena_path))
# kernel to be applied
__UpperCAmelCase = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
__UpperCAmelCase = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
__UpperCAmelCase = Image.fromarray(output).convert('RGB')
pil_img.save('result_dilation.png')
| 220 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ : int = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
UpperCamelCase__ : Tuple = 25_00_04
UpperCamelCase__ : List[Any] = 25_00_20
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
_A : int = MBartTokenizer
_A : Dict = MBartTokenizerFast
_A : List[Any] = True
_A : Any = True
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__SCREAMING_SNAKE_CASE : Optional[Any] = MBartTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = MBartTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__SCREAMING_SNAKE_CASE : Any = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__SCREAMING_SNAKE_CASE : Tuple = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__SCREAMING_SNAKE_CASE : Dict = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__SCREAMING_SNAKE_CASE : Any = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ )
__SCREAMING_SNAKE_CASE : int = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
__SCREAMING_SNAKE_CASE : List[str] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
__SCREAMING_SNAKE_CASE : str = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=True
__SCREAMING_SNAKE_CASE : Union[str, Any] = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : str = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
__SCREAMING_SNAKE_CASE : Dict = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=False
__SCREAMING_SNAKE_CASE : Dict = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
__SCREAMING_SNAKE_CASE : int = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__SCREAMING_SNAKE_CASE : str = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
_A : Optional[Any] = '''facebook/mbart-large-en-ro'''
_A : Union[str, Any] = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
_A : Union[str, Any] = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
_A : List[str] = [8274, 12_7873, 2_5916, 7, 8622, 2071, 438, 6_7485, 53, 18_7895, 23, 5_1712, 2, EN_CODE]
@classmethod
def UpperCamelCase__ ( cls : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
__SCREAMING_SNAKE_CASE : Tuple = 1
return cls
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 2_5_0_0_2_0 )
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
self.assertIn(SCREAMING_SNAKE_CASE_ , self.tokenizer.all_special_ids )
__SCREAMING_SNAKE_CASE : Optional[Any] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
__SCREAMING_SNAKE_CASE : List[str] = self.tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
__SCREAMING_SNAKE_CASE : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = ["""this is gunna be a long sentence """ * 2_0]
assert isinstance(src_text[0] , SCREAMING_SNAKE_CASE_ )
__SCREAMING_SNAKE_CASE : str = 1_0
__SCREAMING_SNAKE_CASE : str = self.tokenizer(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = MBartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , SCREAMING_SNAKE_CASE_ )
@require_torch
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
__SCREAMING_SNAKE_CASE : Optional[int] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
__SCREAMING_SNAKE_CASE : str = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
__SCREAMING_SNAKE_CASE : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(self.src_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=3 , return_tensors="""pt""" )
__SCREAMING_SNAKE_CASE : Any = self.tokenizer(
text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=1_0 , return_tensors="""pt""" )
__SCREAMING_SNAKE_CASE : List[str] = targets["""input_ids"""]
__SCREAMING_SNAKE_CASE : Optional[int] = shift_tokens_right(SCREAMING_SNAKE_CASE_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , {
# A, test, EOS, en_XX
"""input_ids""": [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 2_5_0_0_0_1,
} , ) | 578 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A = "naver-clova-ix/donut-base-finetuned-docvqa"
_A = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
_A = "document_qa"
_A = AutoProcessor
_A = VisionEncoderDecoderModel
_A = ["image", "text"]
_A = ["text"]
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : str ):
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : "Image" , SCREAMING_SNAKE_CASE_ : str ):
_a = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
_a = task_prompt.replace('{user_input}' , SCREAMING_SNAKE_CASE_ )
_a = self.pre_processor.tokenizer(
SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).input_ids
_a = self.pre_processor(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _UpperCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[str] ):
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=SCREAMING_SNAKE_CASE_ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=SCREAMING_SNAKE_CASE_ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=SCREAMING_SNAKE_CASE_ , ).sequences
def _UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ):
_a = self.pre_processor.batch_decode(SCREAMING_SNAKE_CASE_ )[0]
_a = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
_a = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
_a = re.sub(R'<.*?>' , '' , SCREAMING_SNAKE_CASE_ , count=1 ).strip() # remove first task start token
_a = self.pre_processor.tokenajson(SCREAMING_SNAKE_CASE_ )
return sequence["answer"]
| 562 | 0 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 706 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class UpperCAmelCase_ :
def __init__( self):
snake_case_ : List[Any] = [2, 1, 2, -1]
snake_case_ : int = [1, 2, 3, 4]
def snake_case__ ( self):
snake_case_ : str = len(self.first_signal)
snake_case_ : Any = len(self.second_signal)
snake_case_ : List[Any] = max(lowercase_ , lowercase_)
# create a zero matrix of max_length x max_length
snake_case_ : Dict = [[0] * max_length for i in range(lowercase_)]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowercase_):
snake_case_ : List[str] = deque(self.second_signal)
rotated_signal.rotate(lowercase_)
for j, item in enumerate(lowercase_):
matrix[i][j] += item
# multiply the matrix with the first signal
snake_case_ : Any = np.matmul(np.transpose(lowercase_) , np.transpose(self.first_signal))
# rounding-off to two decimal places
return [round(lowercase_ , 2) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 92 | 0 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
A_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Dict:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ = "auto" ) -> List[str]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> List[str]:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = 1
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = len(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE_ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(SCREAMING_SNAKE_CASE_ )}.''' )
# get prompt text embeddings
lowerCamelCase_ = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
lowerCamelCase_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCamelCase_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowerCamelCase_ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowerCamelCase_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = text_embeddings.shape
lowerCamelCase_ = text_embeddings.repeat(1 , SCREAMING_SNAKE_CASE_ , 1 )
lowerCamelCase_ = text_embeddings.view(bs_embed * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase_ = 42
if negative_prompt is None:
lowerCamelCase_ = ['']
elif type(SCREAMING_SNAKE_CASE_ ) is not type(SCREAMING_SNAKE_CASE_ ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(SCREAMING_SNAKE_CASE_ )} !='''
f''' {type(SCREAMING_SNAKE_CASE_ )}.''' )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = [negative_prompt]
elif batch_size != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(SCREAMING_SNAKE_CASE_ )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
' the batch size of `prompt`.' )
else:
lowerCamelCase_ = negative_prompt
lowerCamelCase_ = text_input_ids.shape[-1]
lowerCamelCase_ = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='pt' , )
lowerCamelCase_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCamelCase_ = uncond_embeddings.shape[1]
lowerCamelCase_ = uncond_embeddings.repeat(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 )
lowerCamelCase_ = uncond_embeddings.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
lowerCamelCase_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCamelCase_ = torch.randn(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='cpu' , dtype=SCREAMING_SNAKE_CASE_ ).to(self.device )
lowerCamelCase_ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='cpu' , dtype=SCREAMING_SNAKE_CASE_ ).to(
self.device )
else:
lowerCamelCase_ = torch.randn(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowerCamelCase_ = latents_reference.to(self.device )
lowerCamelCase_ = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowerCamelCase_ = (latents_shape[3] - latents_shape_reference[3]) // 2
lowerCamelCase_ = (latents_shape[2] - latents_shape_reference[2]) // 2
lowerCamelCase_ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowerCamelCase_ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowerCamelCase_ = 0 if dx < 0 else dx
lowerCamelCase_ = 0 if dy < 0 else dy
lowerCamelCase_ = max(-dx , 0 )
lowerCamelCase_ = max(-dy , 0 )
# import pdb
# pdb.set_trace()
lowerCamelCase_ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCamelCase_ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase_ = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ = {}
if accepts_eta:
lowerCamelCase_ = eta
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
lowerCamelCase_ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCamelCase_ ,lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 1 / 0.18_215 * latents
lowerCamelCase_ = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
lowerCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
lowerCamelCase_ = self.feature_extractor(self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) , return_tensors='pt' ).to(
self.device )
lowerCamelCase_ ,lowerCamelCase_ = self.safety_checker(
images=SCREAMING_SNAKE_CASE_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
lowerCamelCase_ = None
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
| 42 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def a ( __snake_case : Callable, __snake_case : float, __snake_case : float, __snake_case : float, __snake_case : float ):
'''simple docstring'''
UpperCAmelCase_ :str = int(np.ceil((x_end - xa) / step_size ) )
UpperCAmelCase_ :int = np.zeros((n + 1,) )
UpperCAmelCase_ :str = ya
UpperCAmelCase_ :List[str] = xa
for k in range(__snake_case ):
UpperCAmelCase_ :Tuple = y[k] + step_size * ode_func(__snake_case, y[k] )
UpperCAmelCase_ :Optional[int] = y[k] + (
(step_size / 2) * (ode_func(__snake_case, y[k] ) + ode_func(x + step_size, __snake_case ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 608 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_A = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 403 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
a_ = argparse.ArgumentParser(add_help=UpperCamelCase , allow_abbrev=UpperCamelCase )
# The main config parser
a_ = config_command_parser(UpperCamelCase )
# The subparser to add commands to
a_ = config_parser.add_subparsers(title="""subcommands""" , dest="""subcommand""" )
# Then add other parsers with the parent parser
default_command_parser(UpperCamelCase , parents=[parent_parser] )
update_command_parser(UpperCamelCase , parents=[parent_parser] )
return config_parser
def __SCREAMING_SNAKE_CASE ( ) -> Any:
"""simple docstring"""
a_ = get_config_parser()
a_ = config_parser.parse_args()
if not hasattr(UpperCamelCase , """func""" ):
config_parser.print_help()
exit(1 )
# Run
args.func(UpperCamelCase )
if __name__ == "__main__":
main() | 403 | 1 |
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ ( a_ ):
def __init__( self : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=13 , __SCREAMING_SNAKE_CASE : Optional[Any]=7 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Optional[Any]=99 , __SCREAMING_SNAKE_CASE : Tuple=32 , __SCREAMING_SNAKE_CASE : Optional[int]=5 , __SCREAMING_SNAKE_CASE : int=4 , __SCREAMING_SNAKE_CASE : List[Any]=37 , __SCREAMING_SNAKE_CASE : Optional[int]="gelu" , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=5_12 , __SCREAMING_SNAKE_CASE : List[Any]=16 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : List[Any]="None" , __SCREAMING_SNAKE_CASE : Dict=3 , __SCREAMING_SNAKE_CASE : int=4 , __SCREAMING_SNAKE_CASE : List[Any]=None , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = relative_attention
__a = position_biased_input
__a = pos_att_type
__a = scope
def _UpperCAmelCase ( self : str ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self : List[str] ):
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _UpperCAmelCase ( self : List[str] ):
__a = self.get_config()
__a = 3_00
return config
def _UpperCAmelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _UpperCAmelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any ):
__a = DebertaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )[0]
__a = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )[0]
__a = model(__SCREAMING_SNAKE_CASE )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _UpperCAmelCase ( self : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ):
__a = DebertaForMaskedLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] ):
__a = self.num_labels
__a = DebertaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple ):
__a = self.num_labels
__a = DebertaForTokenClassification(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str ):
__a = DebertaForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__a = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self : str ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A_ ( a_ , a_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def _UpperCAmelCase ( self : int ):
__a = DebertaModelTester(self )
__a = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def _UpperCAmelCase ( self : str ):
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : List[Any] ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Tuple ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Optional[int] ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : str ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Union[str, Any] ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__SCREAMING_SNAKE_CASE )
@slow
def _UpperCAmelCase ( self : List[str] ):
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = DebertaModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def _UpperCAmelCase ( self : Tuple ):
pass
@slow
def _UpperCAmelCase ( self : List[str] ):
__a = DebertaModel.from_pretrained("microsoft/deberta-base" )
__a = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
__a = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
__a = torch.tensor(
[[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 197 | import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __A ( _A , _A ):
"""simple docstring"""
__a = torch.load(_A , map_location="cpu" )
__a = chkpt["model"]
# We have the base model one level deeper than the original XLM repository
__a = {}
for k, v in state_dict.items():
if "pred_layer" in k:
__a = v
else:
__a = v
__a = chkpt["params"]
__a = {n: v for n, v in config.items() if not isinstance(_A , (torch.FloatTensor, numpy.ndarray) )}
__a = chkpt["dico_word2id"]
__a = {s + "</w>" if s.find("@@" ) == -1 and i > 13 else s.replace("@@" , "" ): i for s, i in vocab.items()}
# Save pytorch-model
__a = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
__a = pytorch_dump_folder_path + "/" + CONFIG_NAME
__a = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"]
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(_A , _A )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(_A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_A , indent=2 ) + "\n" )
print(f"""Save vocab file to {pytorch_config_dump_path}""" )
with open(_A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_A , indent=2 ) + "\n" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 197 | 1 |
"""simple docstring"""
import numpy as np
import qiskit
def __lowerCAmelCase( __UpperCAmelCase = 8 ,__UpperCAmelCase = None ):
"""simple docstring"""
_lowercase : Any = np.random.default_rng(seed=snake_case__ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_lowercase : List[Any] = 6 * key_len
# Measurement basis for Alice's qubits.
_lowercase : str = rng.integers(2 ,size=snake_case__ )
# The set of states Alice will prepare.
_lowercase : Optional[Any] = rng.integers(2 ,size=snake_case__ )
# Measurement basis for Bob's qubits.
_lowercase : Optional[Any] = rng.integers(2 ,size=snake_case__ )
# Quantum Circuit to simulate BB84
_lowercase : Any = qiskit.QuantumCircuit(snake_case__ ,name='BB84' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(snake_case__ ):
if alice_state[index] == 1:
bbaa_circ.x(snake_case__ )
if alice_basis[index] == 1:
bbaa_circ.h(snake_case__ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(snake_case__ ):
if bob_basis[index] == 1:
bbaa_circ.h(snake_case__ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_lowercase : str = qiskit.Aer.get_backend('aer_simulator' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_lowercase : Optional[int] = qiskit.execute(snake_case__ ,snake_case__ ,shots=1 ,seed_simulator=snake_case__ )
# Returns the result of measurement.
_lowercase : Tuple = job.result().get_counts(snake_case__ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_lowercase : Optional[int] = ''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
snake_case__ ,snake_case__ ,snake_case__ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
_lowercase : Tuple = gen_key[:key_len] if len(snake_case__ ) >= key_len else gen_key.ljust(snake_case__ ,'0' )
return key
if __name__ == "__main__":
print(f"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 717 | """simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = ["pixel_values"]
def __init__( self : Optional[int] , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[Dict[str, int]] = None , lowerCamelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase_ : bool = True , lowerCamelCase_ : Dict[str, int] = None , lowerCamelCase_ : bool = True , lowerCamelCase_ : Union[int, float] = 1 / 2_5_5 , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[Union[float, List[float]]] = None , lowerCamelCase_ : Optional[Union[float, List[float]]] = None , **lowerCamelCase_ : int , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
_lowercase : Optional[Any] = size if size is not None else {'shortest_edge': 2_5_6}
_lowercase : Optional[int] = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
_lowercase : List[str] = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
_lowercase : Any = get_size_dict(lowerCamelCase_ )
_lowercase : Optional[int] = do_resize
_lowercase : Optional[int] = size
_lowercase : Union[str, Any] = resample
_lowercase : Optional[Any] = do_center_crop
_lowercase : Union[str, Any] = crop_size
_lowercase : Any = do_rescale
_lowercase : Tuple = rescale_factor
_lowercase : Optional[int] = do_normalize
_lowercase : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowercase : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ : Tuple , ):
"""simple docstring"""
_lowercase : Optional[Any] = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_lowercase : Dict = get_resize_output_image_size(lowerCamelCase_ , size=size['shortest_edge'] , default_to_square=lowerCamelCase_ )
return resize(lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ : str , ):
"""simple docstring"""
_lowercase : Tuple = get_size_dict(lowerCamelCase_ )
return center_crop(lowerCamelCase_ , size=(size['height'], size['width']) , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : float , lowerCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : Union[float, List[float]] , lowerCamelCase_ : Union[float, List[float]] , lowerCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ : List[Any] , ):
"""simple docstring"""
return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : ImageInput , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Dict[str, int] = None , lowerCamelCase_ : PILImageResampling = None , lowerCamelCase_ : bool = None , lowerCamelCase_ : Dict[str, int] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[float] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[Union[float, List[float]]] = None , lowerCamelCase_ : Optional[Union[float, List[float]]] = None , lowerCamelCase_ : Optional[Union[str, TensorType]] = None , lowerCamelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCamelCase_ : int , ):
"""simple docstring"""
_lowercase : Optional[Any] = do_resize if do_resize is not None else self.do_resize
_lowercase : List[Any] = size if size is not None else self.size
_lowercase : Union[str, Any] = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
_lowercase : Any = resample if resample is not None else self.resample
_lowercase : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowercase : List[Any] = crop_size if crop_size is not None else self.crop_size
_lowercase : Tuple = get_size_dict(lowerCamelCase_ )
_lowercase : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase : int = do_normalize if do_normalize is not None else self.do_normalize
_lowercase : str = image_mean if image_mean is not None else self.image_mean
_lowercase : List[Any] = image_std if image_std is not None else self.image_std
_lowercase : Union[str, Any] = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_lowercase : Optional[Any] = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
_lowercase : Optional[Any] = [self.resize(image=lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ ) for image in images]
if do_center_crop:
_lowercase : List[Any] = [self.center_crop(image=lowerCamelCase_ , size=lowerCamelCase_ ) for image in images]
if do_rescale:
_lowercase : str = [self.rescale(image=lowerCamelCase_ , scale=lowerCamelCase_ ) for image in images]
if do_normalize:
_lowercase : Any = [self.normalize(image=lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ ) for image in images]
_lowercase : int = [to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ ) for image in images]
_lowercase : Tuple = {'pixel_values': images}
return BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ )
| 283 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
a : Dict = logging.get_logger(__name__)
a : List[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
a : Tuple = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
a : Any = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
a : Dict = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
a : Optional[int] = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
a : int = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
a : Any = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
a : Optional[int] = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
a : List[str] = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
a : List[str] = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class a ( lowercase__ ):
"""simple docstring"""
a : Dict = VOCAB_FILES_NAMES
a : Tuple = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a : Tuple = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Any = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
a : str = DPRContextEncoderTokenizer
class a ( lowercase__ ):
"""simple docstring"""
a : Union[str, Any] = VOCAB_FILES_NAMES
a : List[Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a : List[str] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Dict = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a : List[Any] = DPRQuestionEncoderTokenizer
a : Optional[int] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
a : Dict = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
a : Tuple = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(lowercase__ )
class a :
"""simple docstring"""
def __call__( self : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : Optional[str] = None , __lowercase : Optional[str] = None , __lowercase : Union[bool, str] = False , __lowercase : Union[bool, str] = False , __lowercase : Optional[int] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : Optional[bool] = None , **__lowercase : List[Any] , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
__lowercase , padding=__lowercase , truncation=__lowercase , max_length=__lowercase , return_tensors=__lowercase , return_attention_mask=__lowercase , **__lowercase , )
elif titles is None or texts is None:
__UpperCAmelCase : Optional[int] = titles if texts is None else texts
return super().__call__(
__lowercase , __lowercase , padding=__lowercase , truncation=__lowercase , max_length=__lowercase , return_tensors=__lowercase , return_attention_mask=__lowercase , **__lowercase , )
__UpperCAmelCase : Optional[int] = titles if not isinstance(__lowercase , __lowercase ) else [titles]
__UpperCAmelCase : Dict = texts if not isinstance(__lowercase , __lowercase ) else [texts]
__UpperCAmelCase : Union[str, Any] = len(__lowercase )
__UpperCAmelCase : Dict = questions if not isinstance(__lowercase , __lowercase ) else [questions] * n_passages
assert len(__lowercase ) == len(
__lowercase ), f"""There should be as many titles than texts but got {len(__lowercase )} titles and {len(__lowercase )} texts."""
__UpperCAmelCase : Union[str, Any] = super().__call__(__lowercase , __lowercase , padding=__lowercase , truncation=__lowercase )["""input_ids"""]
__UpperCAmelCase : Any = super().__call__(__lowercase , add_special_tokens=__lowercase , padding=__lowercase , truncation=__lowercase )["""input_ids"""]
__UpperCAmelCase : Union[str, Any] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__lowercase , __lowercase )
]
}
if return_attention_mask is not False:
__UpperCAmelCase : Tuple = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__UpperCAmelCase : int = attention_mask
return self.pad(__lowercase , padding=__lowercase , max_length=__lowercase , return_tensors=__lowercase )
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : BatchEncoding , __lowercase : DPRReaderOutput , __lowercase : int = 16 , __lowercase : int = 64 , __lowercase : int = 4 , ) -> List[DPRSpanPrediction]:
__UpperCAmelCase : List[Any] = reader_input["""input_ids"""]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = reader_output[:3]
__UpperCAmelCase : Any = len(__lowercase )
__UpperCAmelCase : Union[str, Any] = sorted(range(__lowercase ) , reverse=__lowercase , key=relevance_logits.__getitem__ )
__UpperCAmelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
__UpperCAmelCase : List[Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__UpperCAmelCase : Tuple = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__UpperCAmelCase : Dict = sequence_ids.index(self.pad_token_id )
else:
__UpperCAmelCase : Any = len(__lowercase )
__UpperCAmelCase : str = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__lowercase , top_spans=__lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__lowercase , start_index=__lowercase , end_index=__lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : List[int] , __lowercase : List[int] , __lowercase : int , __lowercase : int , ) -> List[DPRSpanPrediction]:
__UpperCAmelCase : Union[str, Any] = []
for start_index, start_score in enumerate(__lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__UpperCAmelCase : Tuple = sorted(__lowercase , key=lambda __lowercase : x[1] , reverse=__lowercase )
__UpperCAmelCase : List[Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]"""
__UpperCAmelCase : Optional[Any] = end_index - start_index + 1
assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(lowercase__ )
class a ( lowercase__ , lowercase__ ):
"""simple docstring"""
a : Optional[int] = VOCAB_FILES_NAMES
a : Optional[Any] = READER_PRETRAINED_VOCAB_FILES_MAP
a : int = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[Any] = READER_PRETRAINED_INIT_CONFIGURATION
a : List[Any] = ['input_ids', 'attention_mask']
a : Optional[Any] = DPRReaderTokenizer
| 63 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def snake_case_ (*__A : Optional[Any] ) -> str:
with open(__A , """r""" ) as fh:
fcntl.flock(__A , fcntl.LOCK_EX )
try:
print(*__A )
finally:
fcntl.flock(__A , fcntl.LOCK_UN )
__UpperCAmelCase = int(os.environ["""LOCAL_RANK"""])
torch.cuda.set_device(local_rank)
__UpperCAmelCase = torch.device("""cuda""", local_rank)
__UpperCAmelCase = socket.gethostname()
__UpperCAmelCase = F'[{hostname}-{local_rank}]'
try:
# test distributed
dist.init_process_group("""nccl""")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__UpperCAmelCase = dist.get_rank()
__UpperCAmelCase = dist.get_world_size()
printflock(F'{gpu} is OK (global rank: {rank}/{world_size})')
dist.barrier()
if rank == 0:
printflock(F'pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}')
except Exception:
printflock(F'{gpu} is broken')
raise
| 651 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : List[str] = """dpr"""
def __init__( self, snake_case__=3_05_22, snake_case__=7_68, snake_case__=12, snake_case__=12, snake_case__=30_72, snake_case__="gelu", snake_case__=0.1, snake_case__=0.1, snake_case__=5_12, snake_case__=2, snake_case__=0.02, snake_case__=1E-12, snake_case__=0, snake_case__="absolute", snake_case__ = 0, **snake_case__, ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=snake_case__, **snake_case__ )
lowercase_ : Tuple = vocab_size
lowercase_ : Dict = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : int = num_attention_heads
lowercase_ : Any = hidden_act
lowercase_ : str = intermediate_size
lowercase_ : List[Any] = hidden_dropout_prob
lowercase_ : List[Any] = attention_probs_dropout_prob
lowercase_ : List[Any] = max_position_embeddings
lowercase_ : Optional[int] = type_vocab_size
lowercase_ : Union[str, Any] = initializer_range
lowercase_ : Any = layer_norm_eps
lowercase_ : Dict = projection_dim
lowercase_ : List[str] = position_embedding_type | 707 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
UpperCAmelCase_ = Lock()
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(lowercase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowercase_ : List[str] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowercase_ : Dict = min(lowercase , lowercase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(lowercase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowercase_ : List[Any] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowercase_ : int = max(lowercase , lowercase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(lowercase )
def __magic_name__ ( lowercase ) -> List[Any]:
"""simple docstring"""
lowercase_ : Union[str, Any] = []
lowercase_ : Union[str, Any] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowercase_ : str = Pipe()
lowercase_ : int = Pipe()
process_array_.append(
Process(
target=lowercase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
lowercase_ : Union[str, Any] = temp_rs
lowercase_ : str = temp_rr
for i in range(1 , len(lowercase ) - 1 ):
lowercase_ : str = Pipe()
lowercase_ : str = Pipe()
process_array_.append(
Process(
target=lowercase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
lowercase_ : int = temp_rs
lowercase_ : int = temp_rr
process_array_.append(
Process(
target=lowercase , args=(
len(lowercase ) - 1,
arr[len(lowercase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(lowercase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(lowercase ) ):
lowercase_ : Tuple = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __magic_name__ ( ) -> Tuple:
"""simple docstring"""
lowercase_ : List[str] = list(range(10 , 0 , -1 ) )
print("""Initial List""" )
print(*lowercase )
lowercase_ : Dict = odd_even_transposition(lowercase )
print("""Sorted List\n""" )
print(*lowercase )
if __name__ == "__main__":
main() | 436 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase: str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: List[str] = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: List[Any] = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
_lowerCAmelCase: Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 20 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : List[Any], lowerCAmelCase_ : str ):
# Initialise PyTorch model
__lowerCAmelCase = RemBertConfig.from_json_file(lowerCAmelCase_ )
print('Building PyTorch model from configuration: {}'.format(str(lowerCAmelCase_ ) ) )
__lowerCAmelCase = RemBertModel(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# Save pytorch-model
print('Save PyTorch model to {}'.format(lowerCAmelCase_ ) )
torch.save(model.state_dict(), lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_snake_case : int = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 53 | 0 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_snake_case = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
'''simple docstring'''
lowerCamelCase : List[str] = XLNetConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Tuple = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
lowerCamelCase : int = finetuning_task
lowerCamelCase : int = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowerCamelCase : Union[str, Any] = XLNetForSequenceClassification(SCREAMING_SNAKE_CASE_ )
elif "squad" in finetuning_task:
lowerCamelCase : List[Any] = finetuning_task
lowerCamelCase : str = XLNetForQuestionAnswering(SCREAMING_SNAKE_CASE_ )
else:
lowerCamelCase : int = XLNetLMHeadModel(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
lowerCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Dict = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(f"""Save PyTorch model to {os.path.abspath(SCREAMING_SNAKE_CASE_ )}""" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
print(f"""Save configuration file to {os.path.abspath(SCREAMING_SNAKE_CASE_ )}""" )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
_snake_case = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 231 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCamelCase : int = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(__A ) , torch_builtin(__A ) ) )
self.assertFalse(torch.allclose(gelu_python(__A ) , gelu_new(__A ) ) )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCamelCase : Optional[int] = get_activation("gelu" )
lowerCamelCase : List[str] = get_activation("gelu_10" )
lowerCamelCase : str = torch_builtin(__A )
lowerCamelCase : Any = geluaa(__A )
lowerCamelCase : Tuple = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(__A ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _snake_case ( self ):
"""simple docstring"""
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(__A ):
get_activation("bogus" )
with self.assertRaises(__A ):
get_activation(__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = get_activation("gelu" )
lowerCamelCase : Union[str, Any] = 1
lowerCamelCase : str = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__A ):
lowerCamelCase : Union[str, Any] = acta.a
| 231 | 1 |
import math
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(_UpperCAmelCase )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('This should never happen' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowercase_ = 'Enter the base and the power separated by a comma: '
lowercase_ , lowercase_ = map(int, input(prompt).split(','))
lowercase_ , lowercase_ = map(int, input(prompt).split(','))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowercase_ = res(xa, ya)
lowercase_ = res(xa, ya)
# We check for the largest number
if resa > resa:
print('Largest number is', xa, '^', ya)
elif resa > resa:
print('Largest number is', xa, '^', ya)
else:
print('Both are equal')
| 562 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowercase_ = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowercase_ = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowercase_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
lowercase_ = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'CLIPSegConfig': True,
'DeformableDetrConfig': True,
'DetaConfig': True,
'DinatConfig': True,
'DonutSwinConfig': True,
'EfficientFormerConfig': True,
'FSMTConfig': True,
'JukeboxConfig': True,
'LayoutLMv2Config': True,
'MaskFormerSwinConfig': True,
'MT5Config': True,
'NatConfig': True,
'OneFormerConfig': True,
'PerceiverConfig': True,
'RagConfig': True,
'SpeechT5Config': True,
'SwinConfig': True,
'Swin2SRConfig': True,
'Swinv2Config': True,
'SwitchTransformersConfig': True,
'TableTransformerConfig': True,
'TapasConfig': True,
'TransfoXLConfig': True,
'UniSpeechConfig': True,
'UniSpeechSatConfig': True,
'WavLMConfig': True,
'WhisperConfig': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'JukeboxPriorConfig': True,
# TODO: @Younes (for `is_decoder`)
'Pix2StructTextConfig': True,
}
)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
_a = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f"""config.{attribute}""" in modeling_source
or f"""getattr(config, \"{attribute}\"""" in modeling_source
or f"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
_a = True
# Deal with multi-line cases
elif (
re.search(
Rf"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , _UpperCAmelCase , )
is not None
):
_a = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
_a = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
_a = [
'bos_index',
'eos_index',
'pad_index',
'unk_index',
'mask_index',
'image_size',
'use_cache',
'out_features',
'out_indices',
]
_a = ['encoder_no_repeat_ngram_size']
# Special cases to be allowed
_a = True
if not attribute_used:
_a = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
_a = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
_a = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
_a = True
elif attribute.endswith('_token_id' ):
_a = True
# configuration class specific cases
if not case_allowed:
_a = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
_a = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Dict:
_a = dict(inspect.signature(config_class.__init__ ).parameters )
_a = [x for x in list(signature.keys() ) if x not in ['self', 'kwargs']]
_a = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
_a = {}
if len(config_class.attribute_map ) > 0:
_a = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
_a = inspect.getsourcefile(_UpperCAmelCase )
_a = os.path.dirname(_UpperCAmelCase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
_a = [os.path.join(_UpperCAmelCase , _UpperCAmelCase ) for fn in os.listdir(_UpperCAmelCase ) if fn.startswith('modeling_' )]
# Get the source code strings
_a = []
for path in modeling_paths:
if os.path.isfile(_UpperCAmelCase ):
with open(_UpperCAmelCase ) as fp:
modeling_sources.append(fp.read() )
_a = []
for config_param, default_value in zip(_UpperCAmelCase , _UpperCAmelCase ):
# `attributes` here is all the variant names for `config_param`
_a = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
unused_attributes.append(attributes[0] )
return sorted(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> str:
_a = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
_a = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda _UpperCAmelCase : inspect.isclass(_UpperCAmelCase )
and issubclass(_UpperCAmelCase , _UpperCAmelCase )
and inspect.getmodule(_UpperCAmelCase ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
_a = check_config_attributes_being_used(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
_a = unused_attributes
if len(_UpperCAmelCase ) > 0:
_a = 'The following configuration classes contain unused attributes in the corresponding modeling files:\n'
for name, attributes in configs_with_unused_attributes.items():
error += f"""{name}: {attributes}\n"""
raise ValueError(_UpperCAmelCase )
if __name__ == "__main__":
check_config_attributes()
| 562 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class __snake_case :
def __init__( self : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ):
"""simple docstring"""
if len(lowerCamelCase_ ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
_lowerCamelCase : str = list(lowerCamelCase_ )
_lowerCamelCase : Optional[Any] = degree
def __add__( self : int , __lowerCAmelCase : List[str] ):
"""simple docstring"""
if self.degree > polynomial_a.degree:
_lowerCamelCase : Optional[int] = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , lowerCamelCase_ )
else:
_lowerCamelCase : List[Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , lowerCamelCase_ )
def __sub__( self : List[str] , __lowerCAmelCase : Any ):
"""simple docstring"""
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : str ):
"""simple docstring"""
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Optional[int] , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowerCamelCase_ )
return polynomial
def __repr__( self : Optional[Any] ):
"""simple docstring"""
return self.__str__()
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Tuple = [0] * self.degree
for i in range(self.degree ):
_lowerCamelCase : int = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Union[str, Any] = 0 ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [0] * (self.degree + 2)
_lowerCamelCase : Any = constant
for i in range(self.degree + 1 ):
_lowerCamelCase : int = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , lowerCamelCase_ )
def __eq__( self : Dict , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : int , __lowerCAmelCase : str ):
"""simple docstring"""
return not self.__eq__(lowerCamelCase_ )
| 710 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCAmelCase__ = imread(R'''digital_image_processing/image_data/lena_small.jpg''')
lowerCAmelCase__ = cvtColor(img, COLOR_BGR2GRAY)
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Any = cn.convert_to_negative(A_ )
# assert negative_img array for at least one True
assert negative_img.any()
def snake_case_ ( ):
'''simple docstring'''
with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(A_, 1_10 ) ).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''' )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = canny.gen_gaussian_kernel(9, sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Any = imread('''digital_image_processing/image_data/lena_small.jpg''', 0 )
# assert ambiguous array for all == True
assert canny_img.all()
_lowerCamelCase : Tuple = canny.canny(A_ )
# assert canny array for at least one True
assert canny_array.any()
def snake_case_ ( ):
'''simple docstring'''
assert gg.gaussian_filter(A_, 5, sigma=0.9 ).all()
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
_lowerCamelCase : Optional[int] = conv.img_convolve(A_, A_ ).astype(A_ )
assert res.any()
def snake_case_ ( ):
'''simple docstring'''
assert med.median_filter(A_, 3 ).any()
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase : int = sob.sobel_filter(A_ )
assert grad.any() and theta.any()
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[Any] = sp.make_sepia(A_, 20 )
assert sepia.all()
def snake_case_ ( A_ : str = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
_lowerCamelCase : Tuple = bs.Burkes(imread(A_, 1 ), 1_20 )
burkes.process()
assert burkes.output_img.any()
def snake_case_ ( A_ : str = "digital_image_processing/image_data/lena_small.jpg", ):
'''simple docstring'''
_lowerCamelCase : str = rs.NearestNeighbour(imread(A_, 1 ), 4_00, 2_00 )
nn.process()
assert nn.output.any()
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Tuple = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
_lowerCamelCase : Optional[int] = imread(A_, 0 )
# Test for get_neighbors_pixel function() return not None
_lowerCamelCase : Tuple = 0
_lowerCamelCase : Any = 0
_lowerCamelCase : Optional[int] = image[x_coordinate][y_coordinate]
_lowerCamelCase : List[Any] = lbp.get_neighbors_pixel(
A_, A_, A_, A_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
_lowerCamelCase : Any = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0] ):
for j in range(0, image.shape[1] ):
_lowerCamelCase : Union[str, Any] = lbp.local_binary_value(A_, A_, A_ )
assert lbp_image.any()
| 598 | 0 |
import os
import pytest
from attr import dataclass
lowercase_ = 'us-east-1' # defaults region
@dataclass
class __lowerCAmelCase :
_a = 42
_a = """arn:aws:iam::558105141721:role/sagemaker_execution_role"""
_a = {
"""task_name""": """mnli""",
"""per_device_train_batch_size""": 16,
"""per_device_eval_batch_size""": 16,
"""do_train""": True,
"""do_eval""": True,
"""do_predict""": True,
"""output_dir""": """/opt/ml/model""",
"""overwrite_output_dir""": True,
"""max_steps""": 500,
"""save_steps""": 5_500,
}
_a = {**hyperparameters, """max_steps""": 1_000}
@property
def A__ ( self ) -> str:
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def A__ ( self ) -> str:
'''simple docstring'''
return F'''{self.framework}-transfromers-test'''
@property
def A__ ( self ) -> str:
'''simple docstring'''
return F'''./tests/sagemaker/scripts/{self.framework}'''
@property
def A__ ( self ) -> str:
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def a ( A__ : List[str] ) -> List[Any]:
"""simple docstring"""
_lowercase =SageMakerTestEnvironment(framework=request.cls.framework )
| 291 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = """gptj"""
_a = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCAmelCase=50_400 , lowerCAmelCase=2_048 , lowerCAmelCase=4_096 , lowerCAmelCase=28 , lowerCAmelCase=16 , lowerCAmelCase=64 , lowerCAmelCase=None , lowerCAmelCase="gelu_new" , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=1e-5 , lowerCAmelCase=0.02 , lowerCAmelCase=True , lowerCAmelCase=50_256 , lowerCAmelCase=50_256 , lowerCAmelCase=False , **lowerCAmelCase , ) -> Optional[Any]:
'''simple docstring'''
_lowercase =vocab_size
_lowercase =n_positions
_lowercase =n_embd
_lowercase =n_layer
_lowercase =n_head
_lowercase =n_inner
_lowercase =rotary_dim
_lowercase =activation_function
_lowercase =resid_pdrop
_lowercase =embd_pdrop
_lowercase =attn_pdrop
_lowercase =layer_norm_epsilon
_lowercase =initializer_range
_lowercase =use_cache
_lowercase =bos_token_id
_lowercase =eos_token_id
super().__init__(
bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , tie_word_embeddings=lowerCAmelCase , **lowerCAmelCase )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self , lowerCAmelCase , lowerCAmelCase = "default" , lowerCAmelCase = None , lowerCAmelCase = False , ) -> Optional[int]:
'''simple docstring'''
super().__init__(lowerCAmelCase , task=lowerCAmelCase , patching_specs=lowerCAmelCase , use_past=lowerCAmelCase )
if not getattr(self._config , 'pad_token_id' , lowerCAmelCase ):
# TODO: how to do that better?
_lowercase =0
@property
def A__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
_lowercase =OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs' )
_lowercase ={0: 'batch', 1: 'past_sequence + sequence'}
else:
_lowercase ={0: 'batch', 1: 'sequence'}
return common_inputs
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self._config.n_layer
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self._config.n_head
def A__ ( self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
_lowercase =super(lowerCAmelCase , self ).generate_dummy_inputs(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
_lowercase =OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase =common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowercase =seqlen + 2
_lowercase =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowercase =[
(torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(self.num_layers )
]
_lowercase =common_inputs['attention_mask']
if self.use_past:
_lowercase =ordered_inputs['attention_mask'].dtype
_lowercase =torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase , dtype=lowerCAmelCase )] , dim=1 )
return ordered_inputs
@property
def A__ ( self ) -> int:
'''simple docstring'''
return 13
| 291 | 1 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class _a ( _UpperCAmelCase ):
'''simple docstring'''
def __UpperCAmelCase( self , __UpperCAmelCase ):
return 0.0
def lowerCamelCase_ ( _lowercase , _lowercase ) -> tuple[int | float, int | float]:
__A : List[str] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
__A : int = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def lowerCamelCase_ ( _lowercase , _lowercase ) -> None:
__A : List[Any] = 512
__A : Optional[int] = [1] + [0] * (size - 1)
__A : int = [filter_type.process(SCREAMING_SNAKE_CASE_ ) for item in inputs]
__A : Any = [0] * (samplerate - size) # zero-padding
outputs += filler
__A : Optional[Any] = np.abs(np.fft.fft(SCREAMING_SNAKE_CASE_ ) )
__A : Dict = 20 * np.logaa(SCREAMING_SNAKE_CASE_ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
__A : Dict = get_bounds(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(SCREAMING_SNAKE_CASE_ )
plt.show()
def lowerCamelCase_ ( _lowercase , _lowercase ) -> None:
__A : str = 512
__A : Any = [1] + [0] * (size - 1)
__A : Dict = [filter_type.process(SCREAMING_SNAKE_CASE_ ) for item in inputs]
__A : str = [0] * (samplerate - size) # zero-padding
outputs += filler
__A : Any = np.angle(np.fft.fft(SCREAMING_SNAKE_CASE_ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(SCREAMING_SNAKE_CASE_ , -2 * pi ) )
plt.show()
| 702 | import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=18 , __UpperCAmelCase=30 , __UpperCAmelCase=400 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=False , ):
__A : Tuple = size if size is not None else {"height": 20, "width": 20}
__A : Tuple = crop_size if crop_size is not None else {"height": 18, "width": 18}
__A : int = parent
__A : List[Any] = batch_size
__A : Tuple = num_channels
__A : Any = image_size
__A : Optional[int] = min_resolution
__A : Any = max_resolution
__A : str = do_resize
__A : Tuple = size
__A : Tuple = do_center_crop
__A : Union[str, Any] = crop_size
__A : Tuple = do_normalize
__A : Union[str, Any] = image_mean
__A : Dict = image_std
__A : Optional[Any] = do_reduce_labels
def __UpperCAmelCase( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def lowerCamelCase_ ( ) -> str:
__A : List[str] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__A : Optional[Any] = Image.open(dataset[0]["file"] )
__A : Union[str, Any] = Image.open(dataset[1]["file"] )
return image, map
def lowerCamelCase_ ( ) -> Dict:
__A : str = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__A : List[Any] = Image.open(ds[0]["file"] )
__A : Union[str, Any] = Image.open(ds[1]["file"] )
__A : Optional[Any] = Image.open(ds[2]["file"] )
__A : str = Image.open(ds[3]["file"] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _a ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Tuple = BeitImageProcessor if is_vision_available() else None
def __UpperCAmelCase( self ):
__A : Tuple = BeitImageProcessingTester(self )
@property
def __UpperCAmelCase( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase( self ):
__A : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "size" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "do_center_crop" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "center_crop" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "image_std" ) )
def __UpperCAmelCase( self ):
__A : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 20, "width": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
self.assertEqual(image_processor.do_reduce_labels , __UpperCAmelCase )
__A : str = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__UpperCAmelCase )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
self.assertEqual(image_processor.do_reduce_labels , __UpperCAmelCase )
def __UpperCAmelCase( self ):
pass
def __UpperCAmelCase( self ):
# Initialize image_processing
__A : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
__A : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__A : List[Any] = image_processing(__UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCAmelCase( self ):
# Initialize image_processing
__A : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
__A : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__A : List[Any] = image_processing(__UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCAmelCase( self ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
__A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__A : Union[str, Any] = image_processing(__UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCAmelCase( self ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
__A : Tuple = []
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__A : str = image_processing(image_inputs[0] , maps[0] , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched
__A : Any = image_processing(__UpperCAmelCase , __UpperCAmelCase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test not batched input (PIL images)
__A , __A : Optional[Any] = prepare_semantic_single_inputs()
__A : Dict = image_processing(__UpperCAmelCase , __UpperCAmelCase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched input (PIL images)
__A , __A : List[Any] = prepare_semantic_batch_inputs()
__A : Tuple = image_processing(__UpperCAmelCase , __UpperCAmelCase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
def __UpperCAmelCase( self ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__A , __A : List[Any] = prepare_semantic_single_inputs()
__A : Optional[int] = image_processing(__UpperCAmelCase , __UpperCAmelCase , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 150 )
__A : Optional[Any] = True
__A : int = image_processing(__UpperCAmelCase , __UpperCAmelCase , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
| 387 | 0 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=2 , __lowerCamelCase=3_2 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=3_2 , __lowerCamelCase=4 , __lowerCamelCase=[0, 1, 2, 3] , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=0.02 , __lowerCamelCase=3 , __lowerCamelCase=[1, 3_8_4, 2_4, 2_4] , __lowerCamelCase=True , __lowerCamelCase=None , ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[str] = parent
_SCREAMING_SNAKE_CASE : List[str] = batch_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
_SCREAMING_SNAKE_CASE : int = patch_size
_SCREAMING_SNAKE_CASE : Tuple = num_channels
_SCREAMING_SNAKE_CASE : int = is_training
_SCREAMING_SNAKE_CASE : Tuple = use_labels
_SCREAMING_SNAKE_CASE : Dict = hidden_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
_SCREAMING_SNAKE_CASE : Dict = backbone_out_indices
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : Tuple = intermediate_size
_SCREAMING_SNAKE_CASE : Any = hidden_act
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Dict = initializer_range
_SCREAMING_SNAKE_CASE : Optional[int] = num_labels
_SCREAMING_SNAKE_CASE : int = backbone_featmap_shape
_SCREAMING_SNAKE_CASE : Optional[int] = scope
_SCREAMING_SNAKE_CASE : int = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_SCREAMING_SNAKE_CASE : Dict = (image_size // patch_size) ** 2
_SCREAMING_SNAKE_CASE : str = num_patches + 1
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_SCREAMING_SNAKE_CASE : str = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [9_6, 1_9_2, 3_8_4, 7_6_8],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__lowerCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = DPTModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = self.num_labels
_SCREAMING_SNAKE_CASE : List[str] = DPTForDepthEstimation(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any:
_SCREAMING_SNAKE_CASE : int = self.num_labels
_SCREAMING_SNAKE_CASE : Tuple = DPTForSemanticSegmentation(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = config_and_inputs
_SCREAMING_SNAKE_CASE : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__snake_case = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Union[str, Any] = DPTModelTester(self )
_SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=3_7 )
def UpperCamelCase_ ( self ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def UpperCamelCase_ ( self ) -> List[Any]:
pass
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Tuple = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_SCREAMING_SNAKE_CASE : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : int = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE : Any = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Any:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Optional[int] = True
if model_class in get_values(__lowerCamelCase ):
continue
_SCREAMING_SNAKE_CASE : Dict = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
_SCREAMING_SNAKE_CASE : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = model(**__lowerCamelCase ).loss
loss.backward()
def UpperCamelCase_ ( self ) -> Tuple:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : str = True
if model_class in get_values(__lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
_SCREAMING_SNAKE_CASE : Tuple = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.train()
_SCREAMING_SNAKE_CASE : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = model(**__lowerCamelCase ).loss
loss.backward()
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Tuple = _config_zero_init(__lowerCamelCase )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Dict = model_class(config=__lowerCamelCase )
# Skip the check for the backbone
_SCREAMING_SNAKE_CASE : List[str] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_SCREAMING_SNAKE_CASE : str = [F"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCamelCase_ ( self ) -> Dict:
pass
@slow
def UpperCamelCase_ ( self ) -> Tuple:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_SCREAMING_SNAKE_CASE : Optional[int] = DPTModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCamelCase_ ( self ) -> List[Any]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Union[str, Any] = "add"
with self.assertRaises(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = DPTForDepthEstimation(__lowerCamelCase )
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : int = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
_SCREAMING_SNAKE_CASE : List[str] = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img()
_SCREAMING_SNAKE_CASE : List[str] = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE : List[str] = model(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.predicted_depth
# verify the predicted depth
_SCREAMING_SNAKE_CASE : Optional[int] = torch.Size((1, 3_8_4, 3_8_4) )
self.assertEqual(predicted_depth.shape , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , __lowerCamelCase , atol=1E-4 ) ) | 249 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCamelCase__ ={
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None ):
# Initialise PyTorch model
_SCREAMING_SNAKE_CASE : Dict = XLNetConfig.from_json_file(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
_SCREAMING_SNAKE_CASE : Tuple = finetuning_task
_SCREAMING_SNAKE_CASE : List[Any] = GLUE_TASKS_NUM_LABELS[finetuning_task]
_SCREAMING_SNAKE_CASE : Optional[int] = XLNetForSequenceClassification(__lowerCamelCase )
elif "squad" in finetuning_task:
_SCREAMING_SNAKE_CASE : Union[str, Any] = finetuning_task
_SCREAMING_SNAKE_CASE : Tuple = XLNetForQuestionAnswering(__lowerCamelCase )
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = XLNetLMHeadModel(__lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Save pytorch-model
_SCREAMING_SNAKE_CASE : int = os.path.join(__lowerCamelCase, __lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = os.path.join(__lowerCamelCase, __lowerCamelCase )
print(f"""Save PyTorch model to {os.path.abspath(__lowerCamelCase )}""" )
torch.save(model.state_dict(), __lowerCamelCase )
print(f"""Save configuration file to {os.path.abspath(__lowerCamelCase )}""" )
with open(__lowerCamelCase, "w", encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
UpperCamelCase__ =parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
) | 249 | 1 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 244 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :Dict = "naver-clova-ix/donut-base-finetuned-docvqa"
_UpperCAmelCase :str = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
_UpperCAmelCase :Union[str, Any] = "document_qa"
_UpperCAmelCase :Union[str, Any] = AutoProcessor
_UpperCAmelCase :List[Any] = VisionEncoderDecoderModel
_UpperCAmelCase :Union[str, Any] = ["image", "text"]
_UpperCAmelCase :Dict = ["text"]
def __init__( self : Optional[int] , *snake_case__ : Optional[Any] , **snake_case__ : Tuple ):
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." )
super().__init__(*snake_case__ , **snake_case__ )
def UpperCAmelCase__ ( self : str , snake_case__ : "Image" , snake_case__ : str ):
lowerCamelCase_ : Union[str, Any] ="<s_docvqa><s_question>{user_input}</s_question><s_answer>"
lowerCamelCase_ : List[Any] =task_prompt.replace("{user_input}" , snake_case__ )
lowerCamelCase_ : int =self.pre_processor.tokenizer(
snake_case__ , add_special_tokens=snake_case__ , return_tensors="pt" ).input_ids
lowerCamelCase_ : Optional[int] =self.pre_processor(snake_case__ , return_tensors="pt" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def UpperCAmelCase__ ( self : int , snake_case__ : Union[str, Any] ):
return self.model.generate(
inputs["pixel_values"].to(self.device ) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=snake_case__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=snake_case__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=snake_case__ , ).sequences
def UpperCAmelCase__ ( self : Any , snake_case__ : Dict ):
lowerCamelCase_ : Optional[Any] =self.pre_processor.batch_decode(snake_case__ )[0]
lowerCamelCase_ : int =sequence.replace(self.pre_processor.tokenizer.eos_token , "" )
lowerCamelCase_ : Union[str, Any] =sequence.replace(self.pre_processor.tokenizer.pad_token , "" )
lowerCamelCase_ : Dict =re.sub(r"<.*?>" , "" , snake_case__ , count=1 ).strip() # remove first task start token
lowerCamelCase_ : List[str] =self.pre_processor.tokenajson(snake_case__ )
return sequence["answer"]
| 244 | 1 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def UpperCAmelCase_ ( _A , _A=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = OmegaConf.load(lowercase_ )
if display:
print(yaml.dump(OmegaConf.to_container(lowercase_ ) ) )
return config
def UpperCAmelCase_ ( _A , _A=None , _A=None ):
'''simple docstring'''
if conf_path is None:
SCREAMING_SNAKE_CASE__ = '''./model_checkpoints/vqgan_only.yaml'''
SCREAMING_SNAKE_CASE__ = load_config(lowercase_ , display=lowercase_ )
SCREAMING_SNAKE_CASE__ = VQModel(**config.model.params )
if ckpt_path is None:
SCREAMING_SNAKE_CASE__ = '''./model_checkpoints/vqgan_only.pt'''
SCREAMING_SNAKE_CASE__ = torch.load(lowercase_ , map_location=lowercase_ )
if ".ckpt" in ckpt_path:
SCREAMING_SNAKE_CASE__ = sd['''state_dict''']
model.load_state_dict(lowercase_ , strict=lowercase_ )
model.to(lowercase_ )
del sd
return model
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = model.encode(lowercase_ )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
SCREAMING_SNAKE_CASE__ = model.decode(lowercase_ )
return xrec
def UpperCAmelCase_ ( _A , _A=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = string.rsplit('''.''' , 1 )
if reload:
SCREAMING_SNAKE_CASE__ = importlib.import_module(lowercase_ )
importlib.reload(lowercase_ )
return getattr(importlib.import_module(lowercase_ , package=lowercase_ ) , cls )
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def UpperCAmelCase_ ( _A , _A , _A=True , _A=True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = instantiate_from_config(lowercase_ )
if sd is not None:
model.load_state_dict(lowercase_ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def UpperCAmelCase_ ( _A , _A , _A , _A ):
'''simple docstring'''
if ckpt:
SCREAMING_SNAKE_CASE__ = torch.load(lowercase_ , map_location='''cpu''' )
SCREAMING_SNAKE_CASE__ = pl_sd['''global_step''']
print(F'''loaded model from global step {global_step}.''' )
else:
SCREAMING_SNAKE_CASE__ = {'''state_dict''': None}
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=lowercase_ , eval_mode=lowercase_ )['''model''']
return model, global_step
| 493 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowerCAmelCase = None
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""",
"""facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""",
},
}
lowerCAmelCase = {
"""facebook/mbart-large-en-ro""": 1_024,
"""facebook/mbart-large-cc25""": 1_024,
}
# fmt: off
lowerCAmelCase = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : List[str] = VOCAB_FILES_NAMES
_lowerCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Optional[int] = ['''input_ids''', '''attention_mask''']
_lowerCAmelCase : Optional[int] = MBartTokenizer
_lowerCAmelCase : List[int] = []
_lowerCAmelCase : List[int] = []
def __init__( self , lowercase__=None , lowercase__=None , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ):
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Any = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else mask_token
super().__init__(
vocab_file=lowercase__ , tokenizer_file=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , src_lang=lowercase__ , tgt_lang=lowercase__ , additional_special_tokens=lowercase__ , **lowercase__ , )
__UpperCAmelCase : Optional[Any] = vocab_file
__UpperCAmelCase : int = False if not self.vocab_file else True
__UpperCAmelCase : str = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens})
__UpperCAmelCase : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(lowercase__) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__UpperCAmelCase : Any = src_lang if src_lang is not None else '''en_XX'''
__UpperCAmelCase : Optional[int] = self.convert_tokens_to_ids(self._src_lang)
__UpperCAmelCase : List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def A( self):
return self._src_lang
@src_lang.setter
def A( self , lowercase__):
__UpperCAmelCase : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def A( self , lowercase__ , lowercase__ = None):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A( self , lowercase__ , lowercase__ = None):
__UpperCAmelCase : Optional[int] = [self.sep_token_id]
__UpperCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''')
__UpperCAmelCase : Optional[Any] = src_lang
__UpperCAmelCase : Union[str, Any] = self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__)
__UpperCAmelCase : int = self.convert_tokens_to_ids(lowercase__)
__UpperCAmelCase : Tuple = tgt_lang_id
return inputs
def A( self , lowercase__ , lowercase__ = "en_XX" , lowercase__ = None , lowercase__ = "ro_RO" , **lowercase__ , ):
__UpperCAmelCase : Any = src_lang
__UpperCAmelCase : Tuple = tgt_lang
return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__)
def A( self):
return self.set_src_lang_special_tokens(self.src_lang)
def A( self):
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def A( self , lowercase__):
__UpperCAmelCase : Optional[Any] = self.convert_tokens_to_ids(lowercase__)
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : Dict = [self.eos_token_id, self.cur_lang_code]
__UpperCAmelCase : int = self.convert_ids_to_tokens(self.prefix_tokens)
__UpperCAmelCase : Dict = self.convert_ids_to_tokens(self.suffix_tokens)
__UpperCAmelCase : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def A( self , lowercase__):
__UpperCAmelCase : List[str] = self.convert_tokens_to_ids(lowercase__)
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : List[Any] = [self.eos_token_id, self.cur_lang_code]
__UpperCAmelCase : Tuple = self.convert_ids_to_tokens(self.prefix_tokens)
__UpperCAmelCase : Tuple = self.convert_ids_to_tokens(self.suffix_tokens)
__UpperCAmelCase : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def A( self , lowercase__ , lowercase__ = None):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''')
if not os.path.isdir(lowercase__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory.")
return
__UpperCAmelCase : List[Any] = os.path.join(
lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase__):
copyfile(self.vocab_file , lowercase__)
return (out_vocab_file,)
| 462 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowercase__ ( metaclass=snake_case_ ):
'''simple docstring'''
_snake_case = ['''keras_nlp''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(self , ['''keras_nlp'''] )
| 715 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class lowercase__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1 , lowerCamelCase__=False , **lowerCamelCase__ ):
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
UpperCamelCase = vocab_size
UpperCamelCase = d_embed
UpperCamelCase = d_proj
UpperCamelCase = cutoffs + [vocab_size]
UpperCamelCase = [0] + self.cutoffs
UpperCamelCase = div_val
UpperCamelCase = self.cutoffs[0]
UpperCamelCase = len(self.cutoffs ) - 1
UpperCamelCase = self.shortlist_size + self.n_clusters
UpperCamelCase = keep_order
UpperCamelCase = []
UpperCamelCase = []
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
if self.n_clusters > 0:
UpperCamelCase = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=lowerCamelCase__ , name='''cluster_weight''' )
UpperCamelCase = self.add_weight(
shape=(self.n_clusters,) , initializer='''zeros''' , trainable=lowerCamelCase__ , name='''cluster_bias''' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
UpperCamelCase = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=lowerCamelCase__ , name=f'out_projs_._{i}' , )
self.out_projs.append(lowerCamelCase__ )
else:
self.out_projs.append(lowerCamelCase__ )
UpperCamelCase = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=lowerCamelCase__ , name=f'out_layers_._{i}_._weight' , )
UpperCamelCase = self.add_weight(
shape=(self.vocab_size,) , initializer='''zeros''' , trainable=lowerCamelCase__ , name=f'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase = self.d_embed // (self.div_val**i)
UpperCamelCase = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=lowerCamelCase__ , name=f'out_projs_._{i}' )
self.out_projs.append(lowerCamelCase__ )
UpperCamelCase = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=lowerCamelCase__ , name=f'out_layers_._{i}_._weight' , )
UpperCamelCase = self.add_weight(
shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=lowerCamelCase__ , name=f'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
super().build(lowerCamelCase__ )
@staticmethod
def UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
'''simple docstring'''
UpperCamelCase = x
if proj is not None:
UpperCamelCase = tf.einsum('''ibd,ed->ibe''' , lowerCamelCase__ , lowerCamelCase__ )
return tf.einsum('''ibd,nd->ibn''' , lowerCamelCase__ , lowerCamelCase__ ) + b
@staticmethod
def UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = shape_list(lowerCamelCase__ )
UpperCamelCase = tf.range(lp_size[0] , dtype=target.dtype )
UpperCamelCase = tf.stack([r, target] , 1 )
return tf.gather_nd(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True , lowerCamelCase__=False ):
'''simple docstring'''
UpperCamelCase = 0
if self.n_clusters == 0:
UpperCamelCase = self._logit(lowerCamelCase__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
UpperCamelCase = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowerCamelCase__ , logits=lowerCamelCase__ )
UpperCamelCase = tf.nn.log_softmax(lowerCamelCase__ , axis=-1 )
else:
UpperCamelCase = shape_list(lowerCamelCase__ )
UpperCamelCase = []
UpperCamelCase = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
UpperCamelCase = (target >= l_idx) & (target < r_idx)
UpperCamelCase = tf.where(lowerCamelCase__ )
UpperCamelCase = tf.boolean_mask(lowerCamelCase__ , lowerCamelCase__ ) - l_idx
if self.div_val == 1:
UpperCamelCase = self.out_layers[0][0][l_idx:r_idx]
UpperCamelCase = self.out_layers[0][1][l_idx:r_idx]
else:
UpperCamelCase = self.out_layers[i][0]
UpperCamelCase = self.out_layers[i][1]
if i == 0:
UpperCamelCase = tf.concat([cur_W, self.cluster_weight] , 0 )
UpperCamelCase = tf.concat([cur_b, self.cluster_bias] , 0 )
UpperCamelCase = self._logit(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , self.out_projs[0] )
UpperCamelCase = tf.nn.log_softmax(lowerCamelCase__ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
UpperCamelCase = tf.boolean_mask(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = self._gather_logprob(lowerCamelCase__ , lowerCamelCase__ )
else:
UpperCamelCase = self._logit(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , self.out_projs[i] )
UpperCamelCase = tf.nn.log_softmax(lowerCamelCase__ )
UpperCamelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
UpperCamelCase = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowerCamelCase__ )
if target is not None:
UpperCamelCase = tf.boolean_mask(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = tf.boolean_mask(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = self._gather_logprob(lowerCamelCase__ , lowerCamelCase__ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowerCamelCase__ , -cur_logprob , shape_list(lowerCamelCase__ ) )
UpperCamelCase = tf.concat(lowerCamelCase__ , axis=-1 )
if target is not None:
if return_mean:
UpperCamelCase = tf.reduce_mean(lowerCamelCase__ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowerCamelCase__ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowerCamelCase__ , name=self.name , aggregation='''mean''' if return_mean else '''''' )
return out
| 350 | 0 |
"""simple docstring"""
def lowercase_ ( _snake_case ,_snake_case ):
return 1 if input_a == input_a else 0
def lowercase_ ( ):
assert xnor_gate(0 ,0 ) == 1
assert xnor_gate(0 ,1 ) == 0
assert xnor_gate(1 ,0 ) == 0
assert xnor_gate(1 ,1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 223 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
UpperCAmelCase__ : List[str] = logging.get_logger(__name__)
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 223 | 1 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__UpperCAmelCase = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__UpperCAmelCase = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'{len(upper_files)} files contain uppercase characters:')
print("""\n""".join(upper_files) + """\n""")
__UpperCAmelCase = [file for file in filepaths if """ """ in file]
if space_files:
print(F'{len(space_files)} files contain space characters:')
print("""\n""".join(space_files) + """\n""")
__UpperCAmelCase = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(F'{len(hyphen_files)} files contain hyphen characters:')
print("""\n""".join(hyphen_files) + """\n""")
__UpperCAmelCase = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'{len(nodir_files)} files are not in a directory:')
print("""\n""".join(nodir_files) + """\n""")
__UpperCAmelCase = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 708 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["""MobileViTFeatureExtractor"""]
__UpperCAmelCase = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 218 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a : Optional[int] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
def __init__( self : List[Any] , a_ : bool = True , a_ : Dict[str, int] = None , a_ : PILImageResampling = PILImageResampling.BICUBIC , a_ : bool = True , a_ : Union[int, float] = 1 / 255 , a_ : bool = True , a_ : Optional[Union[float, List[float]]] = None , a_ : Optional[Union[float, List[float]]] = None , a_ : bool = True , **a_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(**a_ )
__snake_case = size if size is not None else {"height": 384, "width": 384}
__snake_case = get_size_dict(a_ , default_to_square=a_ )
__snake_case = do_resize
__snake_case = size
__snake_case = resample
__snake_case = do_rescale
__snake_case = rescale_factor
__snake_case = do_normalize
__snake_case = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__snake_case = image_std if image_std is not None else OPENAI_CLIP_STD
__snake_case = do_convert_rgb
def A ( self : List[Any] , a_ : np.ndarray , a_ : Dict[str, int] , a_ : PILImageResampling = PILImageResampling.BICUBIC , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : str , ):
"""simple docstring"""
__snake_case = get_size_dict(a_ , default_to_square=a_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
__snake_case = (size["height"], size["width"])
return resize(a_ , size=a_ , resample=a_ , data_format=a_ , **a_ )
def A ( self : List[str] , a_ : np.ndarray , a_ : Union[int, float] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : int , ):
"""simple docstring"""
return rescale(a_ , scale=a_ , data_format=a_ , **a_ )
def A ( self : Any , a_ : np.ndarray , a_ : Union[float, List[float]] , a_ : Union[float, List[float]] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Optional[Any] , ):
"""simple docstring"""
return normalize(a_ , mean=a_ , std=a_ , data_format=a_ , **a_ )
def A ( self : Any , a_ : ImageInput , a_ : Optional[bool] = None , a_ : Optional[Dict[str, int]] = None , a_ : PILImageResampling = None , a_ : Optional[bool] = None , a_ : Optional[float] = None , a_ : Optional[bool] = None , a_ : Optional[Union[float, List[float]]] = None , a_ : Optional[Union[float, List[float]]] = None , a_ : Optional[Union[str, TensorType]] = None , a_ : bool = None , a_ : ChannelDimension = ChannelDimension.FIRST , **a_ : List[str] , ):
"""simple docstring"""
__snake_case = do_resize if do_resize is not None else self.do_resize
__snake_case = resample if resample is not None else self.resample
__snake_case = do_rescale if do_rescale is not None else self.do_rescale
__snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case = do_normalize if do_normalize is not None else self.do_normalize
__snake_case = image_mean if image_mean is not None else self.image_mean
__snake_case = image_std if image_std is not None else self.image_std
__snake_case = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__snake_case = size if size is not None else self.size
__snake_case = get_size_dict(a_ , default_to_square=a_ )
__snake_case = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__snake_case = [convert_to_rgb(a_ ) for image in images]
# All transformations expect numpy arrays.
__snake_case = [to_numpy_array(a_ ) for image in images]
if do_resize:
__snake_case = [self.resize(image=a_ , size=a_ , resample=a_ ) for image in images]
if do_rescale:
__snake_case = [self.rescale(image=a_ , scale=a_ ) for image in images]
if do_normalize:
__snake_case = [self.normalize(image=a_ , mean=a_ , std=a_ ) for image in images]
__snake_case = [to_channel_dimension_format(a_ , a_ ) for image in images]
__snake_case = BatchFeature(data={"pixel_values": images} , tensor_type=a_ )
return encoded_outputs
| 69 |
from sklearn.metrics import recall_score
import datasets
a_ :int = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
a_ :Union[str, Any] = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
a_ :Optional[Any] = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def lowercase__ ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : Optional[Any] , _lowercase : Optional[int]=None , _lowercase : Tuple=1 , _lowercase : List[Any]="binary" , _lowercase : Any=None , _lowercase : Optional[int]="warn" , ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = recall_score(
_lowercase , _lowercase , labels=_lowercase , pos_label=_lowercase , average=_lowercase , sample_weight=_lowercase , zero_division=_lowercase , )
return {"recall": float(_lowercase ) if score.size == 1 else score}
| 35 | 0 |
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase = 0 ):
__snake_case : Dict = length or len(__lowerCamelCase )
__snake_case : Dict = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
__snake_case , __snake_case : Optional[int] = list_data[i + 1], list_data[i]
__snake_case : Dict = True
return list_data if not swapped else bubble_sort(__lowerCamelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 203 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Tuple=13 , lowerCamelCase : Tuple=32 , lowerCamelCase : Any=2 , lowerCamelCase : Union[str, Any]=3 , lowerCamelCase : Tuple=16 , lowerCamelCase : Any=[32, 64, 128] , lowerCamelCase : str=[1, 2, 1] , lowerCamelCase : Union[str, Any]=[2, 2, 4] , lowerCamelCase : Union[str, Any]=2 , lowerCamelCase : Optional[int]=2.0 , lowerCamelCase : Tuple=True , lowerCamelCase : Optional[int]=0.0 , lowerCamelCase : List[str]=0.0 , lowerCamelCase : Dict=0.1 , lowerCamelCase : Optional[int]="gelu" , lowerCamelCase : Any=False , lowerCamelCase : Any=True , lowerCamelCase : str=0.02 , lowerCamelCase : Tuple=1E-5 , lowerCamelCase : str=True , lowerCamelCase : Optional[Any]=None , lowerCamelCase : List[str]=True , lowerCamelCase : Union[str, Any]=10 , lowerCamelCase : Any=8 , lowerCamelCase : List[str]=["stage1", "stage2"] , lowerCamelCase : Optional[int]=[1, 2] , ) -> Optional[int]:
__snake_case : Any = parent
__snake_case : int = batch_size
__snake_case : Optional[int] = image_size
__snake_case : int = patch_size
__snake_case : Any = num_channels
__snake_case : List[Any] = embed_dim
__snake_case : str = hidden_sizes
__snake_case : int = depths
__snake_case : Any = num_heads
__snake_case : Any = window_size
__snake_case : Optional[Any] = mlp_ratio
__snake_case : Any = qkv_bias
__snake_case : List[Any] = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : Tuple = drop_path_rate
__snake_case : Tuple = hidden_act
__snake_case : Optional[int] = use_absolute_embeddings
__snake_case : Optional[int] = patch_norm
__snake_case : int = layer_norm_eps
__snake_case : Any = initializer_range
__snake_case : Any = is_training
__snake_case : Tuple = scope
__snake_case : Tuple = use_labels
__snake_case : Any = type_sequence_label_size
__snake_case : Dict = encoder_stride
__snake_case : Union[str, Any] = out_features
__snake_case : Union[str, Any] = out_indices
def __snake_case ( self : Optional[Any] ) -> str:
__snake_case : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Optional[Any] = None
if self.use_labels:
__snake_case : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self : Union[str, Any] ) -> List[Any]:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __snake_case ( self : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : str ) -> str:
__snake_case : Tuple = FocalNetModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : List[str] = model(lowerCamelCase )
__snake_case : Tuple = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__snake_case : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __snake_case ( self : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : List[Any] ) -> Optional[int]:
__snake_case : Union[str, Any] = FocalNetBackbone(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : int = model(lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__snake_case : Dict = None
__snake_case : Any = FocalNetBackbone(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Optional[int] = model(lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __snake_case ( self : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Any ) -> Optional[int]:
__snake_case : List[Any] = FocalNetForMaskedImageModeling(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : List[str] = model(lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__snake_case : List[str] = 1
__snake_case : Any = FocalNetForMaskedImageModeling(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case : int = model(lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __snake_case ( self : str , lowerCamelCase : Dict , lowerCamelCase : Any , lowerCamelCase : str ) -> Optional[Any]:
__snake_case : Tuple = self.type_sequence_label_size
__snake_case : List[Any] = FocalNetForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Optional[Any] = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__snake_case : Optional[int] = 1
__snake_case : str = FocalNetForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case : Optional[Any] = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __snake_case ( self : Optional[int] ) -> Optional[int]:
__snake_case : str = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : List[str] = config_and_inputs
__snake_case : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : Optional[int] = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : str = False
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : List[str] = False
def __snake_case ( self : List[str] ) -> Tuple:
__snake_case : Optional[Any] = FocalNetModelTester(self )
__snake_case : int = ConfigTester(self , config_class=lowerCamelCase , embed_dim=37 , has_text_modality=lowerCamelCase )
def __snake_case ( self : Optional[int] ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self : str ) -> Tuple:
return
def __snake_case ( self : Union[str, Any] ) -> str:
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __snake_case ( self : str ) -> int:
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase )
def __snake_case ( self : List[str] ) -> Optional[int]:
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase )
def __snake_case ( self : str ) -> List[str]:
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def __snake_case ( self : Dict ) -> List[str]:
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def __snake_case ( self : str ) -> int:
pass
def __snake_case ( self : Optional[int] ) -> Tuple:
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__snake_case : Dict = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) )
def __snake_case ( self : List[str] ) -> Optional[int]:
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__snake_case : List[str] = model_class(lowerCamelCase )
__snake_case : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : List[Any] = [*signature.parameters.keys()]
__snake_case : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def __snake_case ( self : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any] ) -> str:
__snake_case : Tuple = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__snake_case : List[Any] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
__snake_case : List[str] = outputs.hidden_states
__snake_case : Tuple = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# FocalNet has a different seq_length
__snake_case : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__snake_case : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__snake_case : List[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
__snake_case , __snake_case , __snake_case , __snake_case : Tuple = reshaped_hidden_states[0].shape
__snake_case : Optional[int] = (
reshaped_hidden_states[0].view(lowerCamelCase , lowerCamelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __snake_case ( self : Tuple ) -> int:
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__snake_case : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Dict = True
self.check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __snake_case ( self : Optional[int] ) -> Any:
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[int] = 3
__snake_case : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__snake_case : Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__snake_case : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__snake_case : List[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__snake_case : List[str] = True
self.check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Tuple = True
self.check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase , (padded_height, padded_width) )
@slow
def __snake_case ( self : List[Any] ) -> Union[str, Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : List[str] = FocalNetModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def __snake_case ( self : Tuple ) -> List[Any]:
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[Any] = _config_zero_init(lowerCamelCase )
for model_class in self.all_model_classes:
__snake_case : Tuple = model_class(config=lowerCamelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class a (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __snake_case ( self : Union[str, Any] ) -> List[Any]:
# TODO update organization
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def __snake_case ( self : int ) -> Optional[int]:
__snake_case : List[str] = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(lowerCamelCase )
__snake_case : str = self.default_image_processor
__snake_case : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__snake_case : Optional[Any] = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__snake_case : int = model(**lowerCamelCase )
# verify the logits
__snake_case : List[str] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__snake_case : Optional[int] = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = (FocalNetBackbone,) if is_torch_available() else ()
__UpperCAmelCase : Dict = FocalNetConfig
__UpperCAmelCase : Any = False
def __snake_case ( self : str ) -> List[str]:
__snake_case : Tuple = FocalNetModelTester(self )
| 203 | 1 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_a : Tuple = 0
_a : str = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_a : List[Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_a : Any = tuple[int, int]
class _lowercase :
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Node | None , ) -> None:
__snake_case = pos_x
__snake_case = pos_y
__snake_case = (pos_y, pos_x)
__snake_case = goal_x
__snake_case = goal_y
__snake_case = g_cost
__snake_case = parent
__snake_case = self.calculate_heuristic()
__snake_case = self.g_cost + self.h_cost
def a ( self : Optional[int] ) -> float:
__snake_case = self.pos_x - self.goal_x
__snake_case = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(SCREAMING_SNAKE_CASE_ ) + abs(SCREAMING_SNAKE_CASE_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Node ) -> bool:
return self.f_cost < other.f_cost
class _lowercase :
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : TPosition , SCREAMING_SNAKE_CASE_ : TPosition ) -> List[str]:
__snake_case = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , SCREAMING_SNAKE_CASE_ )
__snake_case = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , SCREAMING_SNAKE_CASE_ )
__snake_case = [self.start]
__snake_case = []
__snake_case = False
def a ( self : int ) -> list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__snake_case = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(SCREAMING_SNAKE_CASE_ )
self.closed_nodes.append(SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_successors(SCREAMING_SNAKE_CASE_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(SCREAMING_SNAKE_CASE_ )
else:
# retrieve the best current path
__snake_case = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(SCREAMING_SNAKE_CASE_ )
else:
self.open_nodes.append(SCREAMING_SNAKE_CASE_ )
return [self.start.pos]
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Node ) -> list[Node]:
__snake_case = []
for action in delta:
__snake_case = parent.pos_x + action[1]
__snake_case = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , SCREAMING_SNAKE_CASE_ , ) )
return successors
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Node | None ) -> list[TPosition]:
__snake_case = node
__snake_case = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__snake_case = current_node.parent
path.reverse()
return path
class _lowercase :
def __init__( self : int , SCREAMING_SNAKE_CASE_ : TPosition , SCREAMING_SNAKE_CASE_ : TPosition ) -> None:
__snake_case = AStar(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = AStar(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = False
def a ( self : Optional[int] ) -> list[TPosition]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__snake_case = self.fwd_astar.open_nodes.pop(0 )
__snake_case = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.fwd_astar.closed_nodes.append(SCREAMING_SNAKE_CASE_ )
self.bwd_astar.closed_nodes.append(SCREAMING_SNAKE_CASE_ )
__snake_case = current_bwd_node
__snake_case = current_fwd_node
__snake_case = {
self.fwd_astar: self.fwd_astar.get_successors(SCREAMING_SNAKE_CASE_ ),
self.bwd_astar: self.bwd_astar.get_successors(SCREAMING_SNAKE_CASE_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(SCREAMING_SNAKE_CASE_ )
else:
# retrieve the best current path
__snake_case = astar.open_nodes.pop(
astar.open_nodes.index(SCREAMING_SNAKE_CASE_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(SCREAMING_SNAKE_CASE_ )
else:
astar.open_nodes.append(SCREAMING_SNAKE_CASE_ )
return [self.fwd_astar.start.pos]
def a ( self : Tuple , SCREAMING_SNAKE_CASE_ : Node , SCREAMING_SNAKE_CASE_ : Node ) -> list[TPosition]:
__snake_case = self.fwd_astar.retrace_path(SCREAMING_SNAKE_CASE_ )
__snake_case = self.bwd_astar.retrace_path(SCREAMING_SNAKE_CASE_ )
bwd_path.pop()
bwd_path.reverse()
__snake_case = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_a : Any = (0, 0)
_a : Any = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_a : Union[str, Any] = time.time()
_a : Dict = AStar(init, goal)
_a : Optional[int] = a_star.search()
_a : int = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
_a : Tuple = time.time()
_a : Union[str, Any] = BidirectionalAStar(init, goal)
_a : Optional[int] = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 56 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase_ = {
'vocab_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/vocab.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/vocab.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/vocab.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/vocab.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/vocab.json',
},
'merges_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/merges.txt',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/merges.txt',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/merges.txt',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/merges.txt',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/merges.txt',
},
'tokenizer_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/tokenizer.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/tokenizer.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/tokenizer.json',
},
}
lowerCAmelCase_ = {
'gpt2': 10_24,
'gpt2-medium': 10_24,
'gpt2-large': 10_24,
'gpt2-xl': 10_24,
'distilgpt2': 10_24,
}
class _A ( _lowerCamelCase ):
_UpperCamelCase : str = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[str] = ['''input_ids''', '''attention_mask''']
_UpperCamelCase : Optional[Any] = GPTaTokenizer
def __init__( self : Optional[Any] , _A : Optional[Any]=None , _A : Any=None , _A : Tuple=None , _A : Optional[int]="<|endoftext|>" , _A : List[Any]="<|endoftext|>" , _A : Union[str, Any]="<|endoftext|>" , _A : Any=False , **_A : Optional[int] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
_A , _A , tokenizer_file=_A , unk_token=_A , bos_token=_A , eos_token=_A , add_prefix_space=_A , **_A , )
lowercase : List[str] = kwargs.pop('''add_bos_token''' , _A )
lowercase : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _A ) != add_prefix_space:
lowercase : Optional[int] = getattr(_A , pre_tok_state.pop('''type''' ) )
lowercase : List[str] = add_prefix_space
lowercase : List[Any] = pre_tok_class(**_A )
lowercase : Dict = add_prefix_space
def __a ( self : List[Any] , *_A : Optional[Any] , **_A : Any ) -> BatchEncoding:
"""simple docstring"""
lowercase : List[str] = kwargs.get('''is_split_into_words''' , _A )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_A , **_A )
def __a ( self : Dict , *_A : List[str] , **_A : Dict ) -> BatchEncoding:
"""simple docstring"""
lowercase : Any = kwargs.get('''is_split_into_words''' , _A )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_A , **_A )
def __a ( self : str , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowercase : Union[str, Any] = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
def __a ( self : Dict , _A : "Conversation" ) -> List[int]:
"""simple docstring"""
lowercase : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_A , add_special_tokens=_A ) + [self.eos_token_id] )
if len(_A ) > self.model_max_length:
lowercase : int = input_ids[-self.model_max_length :]
return input_ids | 217 | 0 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self : int , _a : int , _a : Any=3 , _a : List[Any]=32 , _a : Tuple=3 , _a : Any=10 , _a : str=[10, 20, 30, 40] , _a : Dict=[1, 1, 2, 1] , _a : List[Any]=True , _a : Union[str, Any]=True , _a : Any="relu" , _a : Union[str, Any]=3 , _a : Optional[int]=None , ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =embeddings_size
_SCREAMING_SNAKE_CASE =hidden_sizes
_SCREAMING_SNAKE_CASE =depths
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =num_labels
_SCREAMING_SNAKE_CASE =scope
_SCREAMING_SNAKE_CASE =len(_snake_case )
def __UpperCamelCase ( self : List[str] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_labels )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __UpperCamelCase ( self : Any , _a : List[str] , _a : List[str] , _a : List[str] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =TFResNetModel(config=_snake_case )
_SCREAMING_SNAKE_CASE =model(_snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCamelCase ( self : Any , _a : Optional[int] , _a : Tuple , _a : List[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =TFResNetForImageClassification(_snake_case )
_SCREAMING_SNAKE_CASE =model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =config_and_inputs
_SCREAMING_SNAKE_CASE ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class A__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
UpperCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
UpperCAmelCase = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def __UpperCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =TFResNetModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case )
def __UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : str ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_snake_case )
_SCREAMING_SNAKE_CASE =inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE =['''pixel_values''']
self.assertListEqual(arg_names[:1] , _snake_case )
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
def check_hidden_states_output(_a : Tuple , _a : int , _a : Optional[Any] ):
_SCREAMING_SNAKE_CASE =model_class(_snake_case )
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(_snake_case , _snake_case ) )
_SCREAMING_SNAKE_CASE =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_SCREAMING_SNAKE_CASE =self.model_tester.num_stages
self.assertEqual(len(_snake_case ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE =['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_SCREAMING_SNAKE_CASE =layer_type
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =TFResNetModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __UpperCamelCase ( self : int ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_SCREAMING_SNAKE_CASE =self.default_image_processor
_SCREAMING_SNAKE_CASE =prepare_img()
_SCREAMING_SNAKE_CASE =image_processor(images=_snake_case , return_tensors='''tf''' )
# forward pass
_SCREAMING_SNAKE_CASE =model(**_snake_case )
# verify the logits
_SCREAMING_SNAKE_CASE =tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _snake_case )
_SCREAMING_SNAKE_CASE =tf.constant([-11.10_69, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _snake_case , atol=1E-4 ) ) | 708 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : Optional[Any] = {
'''SCUT-DLVCLab/lilt-roberta-en-base''': (
'''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'''
),
}
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "lilt"
def __init__( self : int , _a : Dict=3_0522 , _a : str=768 , _a : Optional[int]=12 , _a : str=12 , _a : List[str]=3072 , _a : Any="gelu" , _a : int=0.1 , _a : Any=0.1 , _a : Union[str, Any]=512 , _a : str=2 , _a : List[Any]=0.02 , _a : str=1E-12 , _a : str=0 , _a : str="absolute" , _a : Any=None , _a : Dict=4 , _a : int=1024 , **_a : List[Any] , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=_a , **_a )
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =type_vocab_size
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =layer_norm_eps
_SCREAMING_SNAKE_CASE =position_embedding_type
_SCREAMING_SNAKE_CASE =classifier_dropout
_SCREAMING_SNAKE_CASE =channel_shrink_ratio
_SCREAMING_SNAKE_CASE =max_ad_position_embeddings | 191 | 0 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
a : str = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , *__lowercase : Tuple , **__lowercase : str ) -> None:
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , __lowercase , )
super().__init__(*__lowercase , **__lowercase )
| 63 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : List[str] = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[Any] = 'openai-gpt'
a : List[Any] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , __lowercase : Tuple=40478 , __lowercase : Tuple=512 , __lowercase : int=768 , __lowercase : Dict=12 , __lowercase : Union[str, Any]=12 , __lowercase : Optional[Any]="gelu" , __lowercase : Optional[Any]=0.1 , __lowercase : Optional[Any]=0.1 , __lowercase : Tuple=0.1 , __lowercase : Dict=1e-5 , __lowercase : Any=0.02 , __lowercase : List[str]="cls_index" , __lowercase : str=True , __lowercase : Dict=None , __lowercase : str=True , __lowercase : List[str]=0.1 , **__lowercase : List[Any] , ) -> List[Any]:
__UpperCAmelCase : Optional[int] = vocab_size
__UpperCAmelCase : Optional[Any] = n_positions
__UpperCAmelCase : Optional[int] = n_embd
__UpperCAmelCase : str = n_layer
__UpperCAmelCase : Any = n_head
__UpperCAmelCase : Tuple = afn
__UpperCAmelCase : Any = resid_pdrop
__UpperCAmelCase : Union[str, Any] = embd_pdrop
__UpperCAmelCase : str = attn_pdrop
__UpperCAmelCase : str = layer_norm_epsilon
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Optional[int] = summary_type
__UpperCAmelCase : Optional[Any] = summary_use_proj
__UpperCAmelCase : List[Any] = summary_activation
__UpperCAmelCase : Union[str, Any] = summary_first_dropout
__UpperCAmelCase : Dict = summary_proj_to_labels
super().__init__(**__lowercase )
| 63 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowerCamelCase (unittest.TestCase ):
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=18 , lowercase__=30 , lowercase__=400 , lowercase__=True , lowercase__=None , lowercase__=True , lowercase__=None , lowercase__=True , lowercase__=[0.5, 0.5, 0.5] , lowercase__=[0.5, 0.5, 0.5] , ) -> List[Any]:
"""simple docstring"""
_snake_case : int = size if size is not None else {'''shortest_edge''': 18}
_snake_case : Tuple = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_snake_case : Union[str, Any] = parent
_snake_case : List[str] = batch_size
_snake_case : Optional[Any] = num_channels
_snake_case : Any = image_size
_snake_case : Union[str, Any] = min_resolution
_snake_case : Optional[Any] = max_resolution
_snake_case : Any = do_resize
_snake_case : Union[str, Any] = size
_snake_case : Optional[int] = do_center_crop
_snake_case : Tuple = crop_size
_snake_case : List[Any] = do_normalize
_snake_case : Dict = image_mean
_snake_case : str = image_std
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowerCamelCase (a__ , unittest.TestCase ):
_lowercase : Tuple = LevitImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
_snake_case : int = LevitImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ) -> Tuple:
"""simple docstring"""
_snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowercase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowercase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowercase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowercase__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(lowercase__ , '''size''' ) )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_snake_case : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_snake_case : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCAmelCase_ ( self ) -> Tuple:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_snake_case : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
_snake_case : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_snake_case : Any = image_processing(lowercase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , numpify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , np.ndarray )
# Test not batched input
_snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_snake_case : List[str] = image_processing(lowercase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_snake_case : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , torchify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , torch.Tensor )
# Test not batched input
_snake_case : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_snake_case : Dict = image_processing(lowercase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 47 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase : Any = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
UpperCAmelCase : Optional[Any] = {
'gpt-neox-20b': 2_0_4_8,
}
class lowerCamelCase (a__ ):
_lowercase : Optional[int] = VOCAB_FILES_NAMES
_lowercase : str = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__=False , **lowercase__ , ) -> List[Any]:
"""simple docstring"""
super().__init__(
lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , add_prefix_space=lowercase__ , **lowercase__ , )
_snake_case : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowercase__ ) != add_prefix_space:
_snake_case : int = getattr(lowercase__ , pre_tok_state.pop('''type''' ) )
_snake_case : int = add_prefix_space
_snake_case : Optional[Any] = pre_tok_class(**lowercase__ )
_snake_case : List[str] = add_prefix_space
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> Tuple[str]:
"""simple docstring"""
_snake_case : Optional[int] = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> List[int]:
"""simple docstring"""
_snake_case : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] )
if len(lowercase__ ) > self.model_max_length:
_snake_case : Dict = input_ids[-self.model_max_length :]
return input_ids
| 47 | 1 |
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase :
@staticmethod
def __SCREAMING_SNAKE_CASE ( *snake_case__ , **snake_case__ ):
"""simple docstring"""
pass
def _lowerCAmelCase ( lowerCamelCase__ : Image ) -> str:
_SCREAMING_SNAKE_CASE : List[str] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCamelCase ( unittest.TestCase ):
A__ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = DepthEstimationPipeline(model=snake_case__ , image_processor=snake_case__ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , snake_case__ )
import datasets
_SCREAMING_SNAKE_CASE : Any = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
_SCREAMING_SNAKE_CASE : str = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , snake_case__ , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
pass
@slow
@require_torch
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = "Intel/dpt-large"
_SCREAMING_SNAKE_CASE : Dict = pipeline("depth-estimation" , model=snake_case__ )
_SCREAMING_SNAKE_CASE : List[Any] = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
_SCREAMING_SNAKE_CASE : str = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 )
@require_torch
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 572 |
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase_ : List[str] = logging.get_logger(__name__)
lowercase_ : List[Any] = {'''vocab_file''': '''spiece.model'''}
lowercase_ : Optional[int] = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
lowercase_ : Union[str, Any] = {
'''google/bigbird-roberta-base''': 40_96,
'''google/bigbird-roberta-large''': 40_96,
'''google/bigbird-base-trivia-itc''': 40_96,
}
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = ["""input_ids""", """attention_mask"""]
A__ = []
def __init__( self , snake_case__ , snake_case__="<unk>" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="<pad>" , snake_case__="[SEP]" , snake_case__="[MASK]" , snake_case__="[CLS]" , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else bos_token
_SCREAMING_SNAKE_CASE : Dict = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else eos_token
_SCREAMING_SNAKE_CASE : Union[str, Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else unk_token
_SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else pad_token
_SCREAMING_SNAKE_CASE : Union[str, Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else cls_token
_SCREAMING_SNAKE_CASE : Union[str, Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_SCREAMING_SNAKE_CASE : Tuple = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
_SCREAMING_SNAKE_CASE : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , sep_token=snake_case__ , mask_token=snake_case__ , cls_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
_SCREAMING_SNAKE_CASE : Any = vocab_file
_SCREAMING_SNAKE_CASE : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case__ )
@property
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = self.__dict__.copy()
_SCREAMING_SNAKE_CASE : Optional[Any] = None
return state
def __setstate__( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_SCREAMING_SNAKE_CASE : Any = {}
_SCREAMING_SNAKE_CASE : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
return self.sp_model.piece_to_id(snake_case__ )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = self.sp_model.IdToPiece(snake_case__ )
return token
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = []
_SCREAMING_SNAKE_CASE : Tuple = ""
_SCREAMING_SNAKE_CASE : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case__ ) + token
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : Dict = []
else:
current_sub_tokens.append(snake_case__ )
_SCREAMING_SNAKE_CASE : int = False
out_string += self.sp_model.decode(snake_case__ )
return out_string.strip()
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ = False , snake_case__ = None , snake_case__ = True , **snake_case__ , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("use_source_tokenizer" , snake_case__ )
_SCREAMING_SNAKE_CASE : int = self.convert_ids_to_tokens(snake_case__ , skip_special_tokens=snake_case__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_SCREAMING_SNAKE_CASE : List[Any] = []
_SCREAMING_SNAKE_CASE : Any = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case__ ) )
_SCREAMING_SNAKE_CASE : Any = []
sub_texts.append(snake_case__ )
else:
current_sub_text.append(snake_case__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case__ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
_SCREAMING_SNAKE_CASE : Optional[int] = re.sub(r" (\[(MASK|SEP)\])" , r"\1" , " ".join(snake_case__ ) )
else:
_SCREAMING_SNAKE_CASE : Dict = "".join(snake_case__ )
_SCREAMING_SNAKE_CASE : Tuple = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_SCREAMING_SNAKE_CASE : Optional[int] = self.clean_up_tokenization(snake_case__ )
return clean_text
else:
return text
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
_SCREAMING_SNAKE_CASE : Any = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
_SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ = None , snake_case__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 572 | 1 |
from __future__ import annotations
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
__lowercase , __lowercase = array[indexa], array[indexa]
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if length > 1:
__lowercase = int(length / 2 )
for i in range(lowerCamelCase , low + middle ):
comp_and_swap(lowerCamelCase , lowerCamelCase , i + middle , lowerCamelCase )
bitonic_merge(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
bitonic_merge(lowerCamelCase , low + middle , lowerCamelCase , lowerCamelCase )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if length > 1:
__lowercase = int(length / 2 )
bitonic_sort(lowerCamelCase , lowerCamelCase , lowerCamelCase , 1 )
bitonic_sort(lowerCamelCase , low + middle , lowerCamelCase , 0 )
bitonic_merge(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : Any = input("""Enter numbers separated by a comma:\n""").strip()
__UpperCamelCase : Dict = [int(item.strip()) for item in user_input.split(""",""")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("""\nSorted array in ascending order is: """, end="""""")
print(*unsorted, sep=""", """)
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("""Sorted array in descending order is: """, end="""""")
print(*unsorted, sep=""", """)
| 53 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = RobertaPreLayerNormConfig.from_pretrained(
lowerCamelCase , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
__lowercase = torch.load(hf_hub_download(repo_id=lowerCamelCase , filename="""pytorch_model.bin""" ) )
__lowercase = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
__lowercase = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
__lowercase = tensor_value
__lowercase = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCamelCase , config=lowerCamelCase , state_dict=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
# convert tokenizer
__lowercase = AutoTokenizer.from_pretrained(lowerCamelCase )
tokenizer.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCamelCase : Dict = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 53 | 1 |
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
snake_case_ : Dict = []
snake_case_ : int = set({'''(''', '''[''', '''{'''} )
snake_case_ : Dict = set({''')''', ''']''', '''}'''} )
snake_case_ : Dict = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(_UpperCamelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_UpperCamelCase ) == 0 or (len(_UpperCamelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_UpperCamelCase ) == 0
def lowerCamelCase_ ( ) -> int:
"""simple docstring"""
snake_case_ : Tuple = input('''Enter sequence of brackets: ''' )
if is_balanced(_UpperCamelCase ):
print(_UpperCamelCase , '''is balanced''' )
else:
print(_UpperCamelCase , '''is not balanced''' )
if __name__ == "__main__":
main()
| 60 |
'''simple docstring'''
class lowerCAmelCase :
def __init__( self : List[Any] , __lowercase : str , __lowercase : Any , __lowercase : str ):
"""simple docstring"""
__lowercase =name
__lowercase =value
__lowercase =weight
def __repr__( self : List[str] ):
"""simple docstring"""
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def snake_case ( self : List[Any] ):
"""simple docstring"""
return self.value
def snake_case ( self : str ):
"""simple docstring"""
return self.name
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
return self.weight
def snake_case ( self : Tuple ):
"""simple docstring"""
return self.value / self.weight
def __UpperCamelCase ( lowercase__ : Dict, lowercase__ : List[str], lowercase__ : str ):
'''simple docstring'''
__lowercase =[]
for i in range(len(lowercase__ ) ):
menu.append(Things(name[i], value[i], weight[i] ) )
return menu
def __UpperCamelCase ( lowercase__ : List[Any], lowercase__ : List[Any], lowercase__ : str ):
'''simple docstring'''
__lowercase =sorted(lowercase__, key=lowercase__, reverse=lowercase__ )
__lowercase =[]
__lowercase , __lowercase =0.0, 0.0
for i in range(len(lowercase__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __UpperCamelCase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 119 | 0 |
'''simple docstring'''
def __snake_case ( UpperCAmelCase_ : str ):
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
lowerCamelCase_ = sorted(string.lower() )
return len(UpperCAmelCase_ ) == len(set(UpperCAmelCase_ ) )
if __name__ == "__main__":
a_ : str = input("""Enter a string """).strip()
a_ : List[str] = is_isogram(input_str)
print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
| 708 |
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __snake_case ( UpperCAmelCase_ : dict ):
return (data["data"], data["target"])
def __snake_case ( UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : np.ndarray ):
lowerCamelCase_ = XGBClassifier()
classifier.fit(UpperCAmelCase_ , UpperCAmelCase_ )
return classifier
def __snake_case ( ):
lowerCamelCase_ = load_iris()
lowerCamelCase_ ,lowerCamelCase_ = data_handling(UpperCAmelCase_ )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = train_test_split(
UpperCAmelCase_ , UpperCAmelCase_ , test_size=0.25 )
lowerCamelCase_ = iris["target_names"]
# Create an XGBoost Classifier from the training data
lowerCamelCase_ = xgboost(UpperCAmelCase_ , UpperCAmelCase_ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , display_labels=UpperCAmelCase_ , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 445 | 0 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase_ : Optional[Any] = {
"""huggingface/time-series-transformer-tourism-monthly""": (
"""https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"""
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class __lowercase ( __snake_case ):
_A = "time_series_transformer"
_A = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__(self : Any , snake_case : Optional[int] = None , snake_case : Optional[int] = None , snake_case : str = "student_t" , snake_case : str = "nll" , snake_case : int = 1 , snake_case : List[int] = [1, 2, 3, 4, 5, 6, 7] , snake_case : Optional[Union[str, bool]] = "mean" , snake_case : int = 0 , snake_case : int = 0 , snake_case : int = 0 , snake_case : int = 0 , snake_case : Optional[List[int]] = None , snake_case : Optional[List[int]] = None , snake_case : int = 32 , snake_case : int = 32 , snake_case : int = 2 , snake_case : int = 2 , snake_case : int = 2 , snake_case : int = 2 , snake_case : bool = True , snake_case : str = "gelu" , snake_case : int = 64 , snake_case : float = 0.1 , snake_case : float = 0.1 , snake_case : float = 0.1 , snake_case : float = 0.1 , snake_case : float = 0.1 , snake_case : int = 100 , snake_case : float = 0.02 , snake_case : str=True , **snake_case : Union[str, Any] , ) -> Dict:
# time series specific configuration
_lowercase : Union[str, Any] = prediction_length
_lowercase : Any = context_length or prediction_length
_lowercase : Union[str, Any] = distribution_output
_lowercase : Optional[Any] = loss
_lowercase : Tuple = input_size
_lowercase : Optional[Any] = num_time_features
_lowercase : str = lags_sequence
_lowercase : str = scaling
_lowercase : str = num_dynamic_real_features
_lowercase : Dict = num_static_real_features
_lowercase : Union[str, Any] = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
_lowercase : Any = cardinality
else:
_lowercase : Dict = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
_lowercase : Union[str, Any] = embedding_dimension
else:
_lowercase : List[str] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
_lowercase : str = num_parallel_samples
# Transformer architecture configuration
_lowercase : Tuple = input_size * len(snake_case ) + self._number_of_features
_lowercase : int = d_model
_lowercase : List[Any] = encoder_attention_heads
_lowercase : Optional[Any] = decoder_attention_heads
_lowercase : Dict = encoder_ffn_dim
_lowercase : Any = decoder_ffn_dim
_lowercase : Optional[int] = encoder_layers
_lowercase : Dict = decoder_layers
_lowercase : str = dropout
_lowercase : Union[str, Any] = attention_dropout
_lowercase : Union[str, Any] = activation_dropout
_lowercase : List[Any] = encoder_layerdrop
_lowercase : Tuple = decoder_layerdrop
_lowercase : Optional[int] = activation_function
_lowercase : Optional[Any] = init_std
_lowercase : int = use_cache
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def _a(self : Dict ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 461 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __lowercase :
def __init__(self : Dict , snake_case : Dict , snake_case : List[Any]=13 , snake_case : int=7 , snake_case : List[str]=True , snake_case : List[Any]=True , snake_case : Union[str, Any]=True , snake_case : Dict=True , snake_case : Optional[Any]=99 , snake_case : str=32 , snake_case : Any=2 , snake_case : Optional[Any]=4 , snake_case : int=37 , snake_case : Dict="gelu" , snake_case : Dict=0.1 , snake_case : Dict=0.1 , snake_case : Optional[int]=512 , snake_case : Dict=16 , snake_case : Union[str, Any]=2 , snake_case : Dict=0.02 , snake_case : str=3 , snake_case : str=4 , snake_case : Optional[Any]=None , snake_case : str=0 , ) -> Optional[Any]:
_lowercase : int = parent
_lowercase : Tuple = batch_size
_lowercase : str = seq_length
_lowercase : Optional[Any] = is_training
_lowercase : List[Any] = use_input_mask
_lowercase : Tuple = use_token_type_ids
_lowercase : List[Any] = use_labels
_lowercase : List[str] = vocab_size
_lowercase : Dict = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Dict = intermediate_size
_lowercase : List[Any] = hidden_act
_lowercase : Optional[Any] = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : int = max_position_embeddings
_lowercase : Tuple = type_vocab_size
_lowercase : List[str] = type_sequence_label_size
_lowercase : str = initializer_range
_lowercase : Tuple = num_labels
_lowercase : List[Any] = num_choices
_lowercase : Dict = scope
_lowercase : Optional[Any] = projection_dim
def _a(self : Tuple ) -> List[str]:
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : Optional[int] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_lowercase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : List[Any] = None
if self.use_token_type_ids:
_lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Tuple = None
_lowercase : Optional[Any] = None
_lowercase : Optional[Any] = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : Union[str, Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
_lowercase : Any = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a(self : Union[str, Any] , snake_case : Any , snake_case : Tuple , snake_case : str , snake_case : str , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : str ) -> List[str]:
_lowercase : List[Any] = TFDPRContextEncoder(config=snake_case )
_lowercase : Dict = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
_lowercase : Optional[int] = model(snake_case , token_type_ids=snake_case )
_lowercase : Union[str, Any] = model(snake_case )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _a(self : List[Any] , snake_case : List[str] , snake_case : Tuple , snake_case : Any , snake_case : int , snake_case : Optional[int] , snake_case : Any , snake_case : Union[str, Any] ) -> int:
_lowercase : Optional[Any] = TFDPRQuestionEncoder(config=snake_case )
_lowercase : int = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
_lowercase : str = model(snake_case , token_type_ids=snake_case )
_lowercase : str = model(snake_case )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _a(self : Union[str, Any] , snake_case : Any , snake_case : int , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Any:
_lowercase : Any = TFDPRReader(config=snake_case )
_lowercase : Optional[Any] = model(snake_case , attention_mask=snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def _a(self : int ) -> Optional[Any]:
_lowercase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : int = config_and_inputs
_lowercase : int = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class __lowercase ( __snake_case , __snake_case , unittest.TestCase ):
_A = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
_A = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
_A = False
_A = False
_A = False
_A = False
_A = False
def _a(self : str ) -> List[str]:
_lowercase : List[Any] = TFDPRModelTester(self )
_lowercase : Any = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def _a(self : str ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _a(self : Any ) -> Tuple:
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*snake_case )
def _a(self : Dict ) -> Optional[Any]:
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*snake_case )
def _a(self : List[Any] ) -> Any:
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*snake_case )
@slow
def _a(self : int ) -> str:
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : str = TFDPRContextEncoder.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : List[str] = TFDPRContextEncoder.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : int = TFDPRQuestionEncoder.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Any = TFDPRReader.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_tf
class __lowercase ( unittest.TestCase ):
@slow
def _a(self : Dict ) -> Any:
_lowercase : Any = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base" )
_lowercase : Optional[int] = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 1_0140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
_lowercase : List[Any] = model(snake_case )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_lowercase : Optional[Any] = tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 461 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
'''microsoft/biogpt''': '''https://huggingface.co/microsoft/biogpt/resolve/main/config.json''',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class lowercase ( a ):
lowercase__ : Tuple = """biogpt"""
def __init__( self : Dict , _UpperCamelCase : Union[str, Any]=42_384 , _UpperCamelCase : List[str]=1_024 , _UpperCamelCase : int=24 , _UpperCamelCase : Dict=16 , _UpperCamelCase : Optional[int]=4_096 , _UpperCamelCase : Optional[int]="gelu" , _UpperCamelCase : Optional[Any]=0.1 , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : Optional[int]=1_024 , _UpperCamelCase : str=0.0_2 , _UpperCamelCase : Optional[int]=1e-12 , _UpperCamelCase : Optional[Any]=True , _UpperCamelCase : Tuple=True , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : Optional[Any]=0.0 , _UpperCamelCase : Optional[Any]=1 , _UpperCamelCase : List[Any]=0 , _UpperCamelCase : Optional[Any]=2 , **_UpperCamelCase : Tuple , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = scale_embedding
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = layerdrop
SCREAMING_SNAKE_CASE = activation_dropout
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
| 647 | import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase :
def __init__( self : Any , _UpperCamelCase : Any , _UpperCamelCase : Dict=13 , _UpperCamelCase : List[Any]=64 , _UpperCamelCase : Union[str, Any]=2 , _UpperCamelCase : int=3 , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Tuple=32 , _UpperCamelCase : str=5 , _UpperCamelCase : Tuple=4 , _UpperCamelCase : Any=37 , _UpperCamelCase : List[str]="gelu" , _UpperCamelCase : int=0.1 , _UpperCamelCase : int=0.1 , _UpperCamelCase : Optional[int]=10 , _UpperCamelCase : Tuple=0.0_2 , _UpperCamelCase : Union[str, Any]=[1, 16, 4, 4] , _UpperCamelCase : Optional[Any]=None , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
SCREAMING_SNAKE_CASE = (self.image_size // 32) ** 2
SCREAMING_SNAKE_CASE = num_patches + 1
def __snake_case( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def __snake_case( self : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [4, 8, 16, 32],
"num_groups": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_UpperCamelCase , )
def __snake_case( self : Dict , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case( self : Any , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.type_sequence_label_size
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __snake_case( self : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( a , a , unittest.TestCase ):
lowercase__ : Optional[int] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
lowercase__ : List[Any] = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : int = False
lowercase__ : Any = False
lowercase__ : Optional[int] = False
def __snake_case( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def __snake_case( self : Optional[Any] ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __snake_case( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
pass
def __snake_case( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def __snake_case( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def __snake_case( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __snake_case( self : Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(_UpperCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=_UpperCamelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
SCREAMING_SNAKE_CASE = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def __snake_case( self : Any ) -> List[Any]:
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = ViTHybridModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def __snake_case( self : List[Any] ) -> Any:
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __snake_case( self : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
def __snake_case( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridImageProcessor.from_pretrained("google/vit-hybrid-base-bit-384" )
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification.from_pretrained("google/vit-hybrid-base-bit-384" , device_map="auto" )
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" )
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = outputs.logits
# model predicts one of the 1000 ImageNet classes
SCREAMING_SNAKE_CASE = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , "tabby, tabby cat" )
| 647 | 1 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
__A , __A , __A = False, False, False
@dataclass
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowercase_ = None
lowercase_ = True
lowercase_ = True
lowercase_ = None
# Automatically constructed
lowercase_ = "dict"
lowercase_ = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
lowercase_ = field(default="Audio" , init=a__ , repr=a__ )
def __call__(self : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
return self.pa_type
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : int) ->Any:
'''simple docstring'''
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'.") from err
if isinstance(_lowerCamelCase , _lowerCamelCase):
return {"bytes": None, "path": value}
elif isinstance(_lowerCamelCase , _lowerCamelCase):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
lowerCamelCase__: List[str] =BytesIO()
sf.write(_lowerCamelCase , value["array"] , value["sampling_rate"] , format="wav")
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path") is not None and os.path.isfile(value["path"]):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm"):
# "PCM" only has raw audio bytes
if value.get("sampling_rate") is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object")
if value.get("bytes"):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
lowerCamelCase__: List[Any] =np.frombuffer(value["bytes"] , dtype=np.intaa).astype(np.floataa) / 32_767
else:
lowerCamelCase__: Tuple =np.memmap(value["path"] , dtype="h" , mode="r").astype(np.floataa) / 32_767
lowerCamelCase__: Optional[Any] =BytesIO(bytes())
sf.write(_lowerCamelCase , _lowerCamelCase , value["sampling_rate"] , format="wav")
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path")}
elif value.get("bytes") is not None or value.get("path") is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes"), "path": value.get("path")}
else:
raise ValueError(
F"""An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.""")
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] = None) ->Tuple:
'''simple docstring'''
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead.")
lowerCamelCase__: int =(value["""path"""], BytesIO(value["bytes"])) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(F"""An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.""")
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'.") from err
lowerCamelCase__: Any =xsplitext(_lowerCamelCase)[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. ")
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. ")
if file is None:
lowerCamelCase__: Any =token_per_repo_id or {}
lowerCamelCase__: Union[str, Any] =path.split("::")[-1]
try:
lowerCamelCase__: Optional[int] =string_to_dict(_lowerCamelCase , config.HUB_DATASETS_URL)["""repo_id"""]
lowerCamelCase__: Any =token_per_repo_id[repo_id]
except (ValueError, KeyError):
lowerCamelCase__: Tuple =None
with xopen(_lowerCamelCase , "rb" , use_auth_token=_lowerCamelCase) as f:
lowerCamelCase__: Union[str, Any] =sf.read(_lowerCamelCase)
else:
lowerCamelCase__: List[str] =sf.read(_lowerCamelCase)
lowerCamelCase__: Union[str, Any] =array.T
if self.mono:
lowerCamelCase__: Tuple =librosa.to_mono(_lowerCamelCase)
if self.sampling_rate and self.sampling_rate != sampling_rate:
lowerCamelCase__: int =librosa.resample(_lowerCamelCase , orig_sr=_lowerCamelCase , target_sr=self.sampling_rate)
lowerCamelCase__: str =self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def SCREAMING_SNAKE_CASE_ (self : str) ->Any:
'''simple docstring'''
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature.")
return {
"bytes": Value("binary"),
"path": Value("string"),
}
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : int) ->List[Any]:
'''simple docstring'''
if pa.types.is_string(storage.type):
lowerCamelCase__: List[Any] =pa.array([None] * len(_lowerCamelCase) , type=pa.binary())
lowerCamelCase__: Union[str, Any] =pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
lowerCamelCase__: Dict =pa.array([None] * len(_lowerCamelCase) , type=pa.string())
lowerCamelCase__: str =pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null())
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices("array"):
lowerCamelCase__: Tuple =pa.array([Audio().encode_example(_lowerCamelCase) if x is not None else None for x in storage.to_pylist()])
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index("bytes") >= 0:
lowerCamelCase__: str =storage.field("bytes")
else:
lowerCamelCase__: Optional[int] =pa.array([None] * len(_lowerCamelCase) , type=pa.binary())
if storage.type.get_field_index("path") >= 0:
lowerCamelCase__: int =storage.field("path")
else:
lowerCamelCase__: Union[str, Any] =pa.array([None] * len(_lowerCamelCase) , type=pa.string())
lowerCamelCase__: List[Any] =pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null())
return array_cast(_lowerCamelCase , self.pa_type)
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : Optional[int]) ->str:
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(UpperCAmelCase_ : List[str]):
with xopen(_lowerCamelCase , "rb") as f:
lowerCamelCase__: str =f.read()
return bytes_
lowerCamelCase__: List[Any] =pa.array(
[
(path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowerCamelCase__: Union[str, Any] =pa.array(
[os.path.basename(_lowerCamelCase) if path is not None else None for path in storage.field("path").to_pylist()] , type=pa.string() , )
lowerCamelCase__: int =pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null())
return array_cast(_lowerCamelCase , self.pa_type)
| 59 |
'''simple docstring'''
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__A =2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
__A ={
# fairseq:
'wmt19-ru-en': {'length_penalty': 1.1},
'wmt19-en-ru': {'length_penalty': 1.1_5},
'wmt19-en-de': {'length_penalty': 1.0},
'wmt19-de-en': {'length_penalty': 1.1},
# allenai:
'wmt16-en-de-dist-12-1': {'length_penalty': 0.6},
'wmt16-en-de-dist-6-1': {'length_penalty': 0.6},
'wmt16-en-de-12-1': {'length_penalty': 0.8},
'wmt19-de-en-6-6-base': {'length_penalty': 0.6},
'wmt19-de-en-6-6-big': {'length_penalty': 0.6},
}
# this remaps the different models to their organization names
__A ={}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__A ='facebook'
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
__A ='allenai'
def _UpperCamelCase ( UpperCamelCase__ ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
UpperCAmelCase__ : Union[str, Any] = dict((re.sub(R"""@@$""" , """""" , UpperCamelCase__ ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" , """</w>""" , UpperCamelCase__ ), v) for k, v in d.items() )
UpperCAmelCase__ : Optional[int] = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
UpperCAmelCase__ : str = d[k] # restore
return da
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
# prep
assert os.path.exists(UpperCamelCase__ )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
UpperCAmelCase__ : str = basename(UpperCamelCase__ )
UpperCAmelCase__ : Optional[int] = dirname(UpperCamelCase__ )
UpperCAmelCase__ : List[str] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
UpperCAmelCase__ : Optional[int] = cls.hub_models()
UpperCAmelCase__ : Optional[int] = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""}
UpperCAmelCase__ : Union[str, Any] = """."""
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
UpperCAmelCase__ : int = hub_utils.from_pretrained(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , archive_map=UpperCamelCase__ , **UpperCamelCase__ )
UpperCAmelCase__ : List[Any] = vars(chkpt["""args"""]["""model"""] )
UpperCAmelCase__ : Dict = args["""source_lang"""]
UpperCAmelCase__ : int = args["""target_lang"""]
UpperCAmelCase__ : Dict = dirname(UpperCamelCase__ )
UpperCAmelCase__ : Any = basename(UpperCamelCase__ )
# dicts
UpperCAmelCase__ : Any = os.path.join(UpperCamelCase__ , f'''dict.{src_lang}.txt''' )
UpperCAmelCase__ : List[str] = os.path.join(UpperCamelCase__ , f'''dict.{tgt_lang}.txt''' )
UpperCAmelCase__ : str = Dictionary.load(UpperCamelCase__ )
UpperCAmelCase__ : Union[str, Any] = rewrite_dict_keys(src_dict.indices )
UpperCAmelCase__ : List[Any] = len(UpperCamelCase__ )
UpperCAmelCase__ : Tuple = os.path.join(UpperCamelCase__ , """vocab-src.json""" )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
UpperCAmelCase__ : List[str] = True
for k in src_vocab.keys():
if not k.islower():
UpperCAmelCase__ : Optional[int] = False
break
UpperCAmelCase__ : Tuple = Dictionary.load(UpperCamelCase__ )
UpperCAmelCase__ : Any = rewrite_dict_keys(tgt_dict.indices )
UpperCAmelCase__ : Tuple = len(UpperCamelCase__ )
UpperCAmelCase__ : Any = os.path.join(UpperCamelCase__ , """vocab-tgt.json""" )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# merges_file (bpecodes)
UpperCAmelCase__ : List[str] = os.path.join(UpperCamelCase__ , VOCAB_FILES_NAMES["""merges_file"""] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
UpperCAmelCase__ : int = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ):
break
with open(UpperCamelCase__ , encoding="""utf-8""" ) as fin:
UpperCAmelCase__ : int = fin.read()
UpperCAmelCase__ : Union[str, Any] = re.sub(R""" \d+$""" , """""" , UpperCamelCase__ , 0 , re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as fout:
fout.write(UpperCamelCase__ )
# model config
UpperCAmelCase__ : Tuple = os.path.join(UpperCamelCase__ , """config.json""" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args['tokenizer']}'''
UpperCAmelCase__ : int = {
"""architectures""": ["""FSMTForConditionalGeneration"""],
"""model_type""": """fsmt""",
"""activation_dropout""": args["""activation_dropout"""],
"""activation_function""": """relu""",
"""attention_dropout""": args["""attention_dropout"""],
"""d_model""": args["""decoder_embed_dim"""],
"""dropout""": args["""dropout"""],
"""init_std""": 0.02,
"""max_position_embeddings""": args["""max_source_positions"""],
"""num_hidden_layers""": args["""encoder_layers"""],
"""src_vocab_size""": src_vocab_size,
"""tgt_vocab_size""": tgt_vocab_size,
"""langs""": [src_lang, tgt_lang],
"""encoder_attention_heads""": args["""encoder_attention_heads"""],
"""encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""],
"""encoder_layerdrop""": args["""encoder_layerdrop"""],
"""encoder_layers""": args["""encoder_layers"""],
"""decoder_attention_heads""": args["""decoder_attention_heads"""],
"""decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""],
"""decoder_layerdrop""": args["""decoder_layerdrop"""],
"""decoder_layers""": args["""decoder_layers"""],
"""bos_token_id""": 0,
"""pad_token_id""": 1,
"""eos_token_id""": 2,
"""is_encoder_decoder""": True,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_all_embeddings"""],
}
# good hparam defaults to start with
UpperCAmelCase__ : List[Any] = 5
UpperCAmelCase__ : str = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
UpperCAmelCase__ : Any = best_score_hparams[model_dir]["""length_penalty"""]
else:
UpperCAmelCase__ : Dict = 1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# tokenizer config
UpperCAmelCase__ : List[Any] = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase__ : Union[str, Any] = {
"""langs""": [src_lang, tgt_lang],
"""model_max_length""": 1_0_2_4,
"""do_lower_case""": do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# model
UpperCAmelCase__ : Dict = chkpt["""models"""][0]
UpperCAmelCase__ : int = model.state_dict()
# rename keys to start with 'model.'
UpperCAmelCase__ : List[Any] = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
UpperCAmelCase__ : Dict = [
"""model.model""",
"""model.encoder.version""",
"""model.decoder.version""",
"""model.encoder_embed_tokens.weight""",
"""model.decoder_embed_tokens.weight""",
"""model.encoder.embed_positions._float_tensor""",
"""model.decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
model_state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase__ : Union[str, Any] = FSMTConfig.from_pretrained(UpperCamelCase__ )
UpperCAmelCase__ : Union[str, Any] = FSMTForConditionalGeneration(UpperCamelCase__ )
# check that it loads ok
model_new.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
# save
UpperCAmelCase__ : Optional[int] = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
print("""Conversion is done!""" )
print("""\nLast step is to upload the files to s3""" )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fsmt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__A =parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path) | 407 | 0 |
from __future__ import annotations
from fractions import Fraction
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCAmelCase ( _lowerCamelCase ):
A : Union[str, Any] = []
A : List[Any] = 11
A : int = int("1" + "0" * digit_len )
for num in range(_lowerCamelCase , _lowerCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowerCamelCase , _lowerCamelCase ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
A : Tuple = 10
return solutions
def UpperCAmelCase ( _lowerCamelCase = 2 ):
A : Optional[Any] = 1.0
for fraction in fraction_list(_lowerCamelCase ):
A : Dict = Fraction(_lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCamelCase )
if __name__ == "__main__":
print(solution()) | 17 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__SCREAMING_SNAKE_CASE = """."""
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = os.path.join(REPO_PATH, """utils/documentation_tests.txt""")
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
with open(doctest_file_path) as fp:
for line in fp:
__SCREAMING_SNAKE_CASE = line.strip()
__SCREAMING_SNAKE_CASE = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__SCREAMING_SNAKE_CASE = """\n""".join(non_existent_paths)
raise ValueError(F"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""")
if all_paths != sorted(all_paths):
raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""") | 17 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
_lowercase = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
_lowercase = {
'''facebook/bart-base''': 10_24,
'''facebook/bart-large''': 10_24,
'''facebook/bart-large-mnli''': 10_24,
'''facebook/bart-large-cnn''': 10_24,
'''facebook/bart-large-xsum''': 10_24,
'''yjernite/bart_eli5''': 10_24,
}
class __A ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase :Dict = VOCAB_FILES_NAMES
UpperCamelCase :str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase :Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase :Any = ['''input_ids''', '''attention_mask''']
UpperCamelCase :Optional[int] = BartTokenizer
def __init__(self , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__="replace" , __magic_name__="<s>" , __magic_name__="</s>" , __magic_name__="</s>" , __magic_name__="<s>" , __magic_name__="<unk>" , __magic_name__="<pad>" , __magic_name__="<mask>" , __magic_name__=False , __magic_name__=True , **__magic_name__ , ):
super().__init__(
__lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase , **__lowerCamelCase , )
lowerCamelCase__ : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __lowerCamelCase ) != add_prefix_space:
lowerCamelCase__ : Union[str, Any] = getattr(__lowerCamelCase , pre_tok_state.pop("""type""" ) )
lowerCamelCase__ : Any = add_prefix_space
lowerCamelCase__ : List[Any] = pre_tok_class(**__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowerCamelCase__ : Optional[Any] = """post_processor"""
lowerCamelCase__ : str = getattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase )
if tokenizer_component_instance:
lowerCamelCase__ : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCamelCase__ : Optional[int] = tuple(state["""sep"""] )
if "cls" in state:
lowerCamelCase__ : List[Any] = tuple(state["""cls"""] )
lowerCamelCase__ : Dict = False
if state.get("""add_prefix_space""" , __lowerCamelCase ) != add_prefix_space:
lowerCamelCase__ : Tuple = add_prefix_space
lowerCamelCase__ : Tuple = True
if state.get("""trim_offsets""" , __lowerCamelCase ) != trim_offsets:
lowerCamelCase__ : Tuple = trim_offsets
lowerCamelCase__ : int = True
if changes_to_apply:
lowerCamelCase__ : List[Any] = getattr(__lowerCamelCase , state.pop("""type""" ) )
lowerCamelCase__ : Optional[Any] = component_class(**__lowerCamelCase )
setattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase )
@property
def _snake_case (self ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _snake_case (self , __magic_name__ ):
lowerCamelCase__ : List[str] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else value
lowerCamelCase__ : Any = value
def _snake_case (self , *__magic_name__ , **__magic_name__ ):
lowerCamelCase__ : Union[str, Any] = kwargs.get("""is_split_into_words""" , __lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def _snake_case (self , *__magic_name__ , **__magic_name__ ):
lowerCamelCase__ : Union[str, Any] = kwargs.get("""is_split_into_words""" , __lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def _snake_case (self , __magic_name__ , __magic_name__ = None ):
lowerCamelCase__ : Any = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def _snake_case (self , __magic_name__ , __magic_name__=None ):
lowerCamelCase__ : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _snake_case (self , __magic_name__ , __magic_name__ = None ):
lowerCamelCase__ : Optional[int] = [self.sep_token_id]
lowerCamelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 157 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : str = '''openai/whisper-base'''
A__ : List[Any] = (
'''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '''
'''transcribed text.'''
)
A__ : str = '''transcriber'''
A__ : List[Any] = WhisperProcessor
A__ : Optional[int] = WhisperForConditionalGeneration
A__ : List[str] = ['''audio''']
A__ : Optional[int] = ['''text''']
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : List[Any] ):
"""simple docstring"""
return self.pre_processor(__lowerCamelCase , return_tensors='''pt''' ).input_features
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[str] ):
"""simple docstring"""
return self.model.generate(inputs=__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : int ):
"""simple docstring"""
return self.pre_processor.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )[0]
| 103 | 0 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def a__ ( _UpperCamelCase : Union[str, Any] ):
if not is_accelerate_available():
return method
__lowerCamelCase = version.parse(accelerate.__version__ ).base_version
if version.parse(_UpperCamelCase ) < version.parse('''0.17.0''' ):
return method
def wrapper(self : Dict ,*_UpperCamelCase : List[str] ,**_UpperCamelCase : Any ):
if hasattr(self ,'''_hf_hook''' ) and hasattr(self._hf_hook ,'''pre_forward''' ):
self._hf_hook.pre_forward(self )
return method(self ,*_UpperCamelCase ,**_UpperCamelCase )
return wrapper
| 622 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
a_ = """true"""
def a__ ( _UpperCamelCase : Union[str, Any] ,_UpperCamelCase : List[str]=82 ,_UpperCamelCase : Optional[Any]=16 ):
set_seed(42 )
__lowerCamelCase = RegressionModel()
__lowerCamelCase = deepcopy(_UpperCamelCase )
__lowerCamelCase = RegressionDataset(length=_UpperCamelCase )
__lowerCamelCase = DataLoader(_UpperCamelCase ,batch_size=_UpperCamelCase )
model.to(accelerator.device )
__lowerCamelCase ,__lowerCamelCase = accelerator.prepare(_UpperCamelCase ,_UpperCamelCase )
return model, ddp_model, dataloader
def a__ ( _UpperCamelCase : Accelerator ,_UpperCamelCase : str=False ):
__lowerCamelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
__lowerCamelCase = load_dataset('''glue''' ,'''mrpc''' ,split='''validation''' )
def tokenize_function(_UpperCamelCase : int ):
__lowerCamelCase = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=_UpperCamelCase ,max_length=_UpperCamelCase )
return outputs
with accelerator.main_process_first():
__lowerCamelCase = dataset.map(
_UpperCamelCase ,batched=_UpperCamelCase ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,)
__lowerCamelCase = tokenized_datasets.rename_column('''label''' ,'''labels''' )
def collate_fn(_UpperCamelCase : Any ):
if use_longest:
return tokenizer.pad(_UpperCamelCase ,padding='''longest''' ,return_tensors='''pt''' )
return tokenizer.pad(_UpperCamelCase ,padding='''max_length''' ,max_length=1_28 ,return_tensors='''pt''' )
return DataLoader(_UpperCamelCase ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=16 )
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : List[str] ):
__lowerCamelCase = Accelerator(dispatch_batches=_UpperCamelCase ,split_batches=_UpperCamelCase )
__lowerCamelCase = get_dataloader(_UpperCamelCase ,not dispatch_batches )
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' ,return_dict=_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = accelerator.prepare(_UpperCamelCase ,_UpperCamelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Union[str, Any] ):
__lowerCamelCase = []
for batch in dataloader:
__lowerCamelCase ,__lowerCamelCase = batch.values()
with torch.no_grad():
__lowerCamelCase = model(_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__lowerCamelCase ,__lowerCamelCase = [], []
for logit, targ in logits_and_targets:
logits.append(_UpperCamelCase )
targs.append(_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = torch.cat(_UpperCamelCase ), torch.cat(_UpperCamelCase )
return logits, targs
def a__ ( _UpperCamelCase : Accelerator ,_UpperCamelCase : List[Any]=82 ,_UpperCamelCase : str=False ,_UpperCamelCase : List[str]=False ,_UpperCamelCase : Optional[int]=16 ):
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = get_basic_setup(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = generate_predictions(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
assert (
len(_UpperCamelCase ) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_UpperCamelCase )}"""
def a__ ( _UpperCamelCase : bool = False ,_UpperCamelCase : bool = False ):
__lowerCamelCase = evaluate.load('''glue''' ,'''mrpc''' )
__lowerCamelCase ,__lowerCamelCase = get_mrpc_setup(_UpperCamelCase ,_UpperCamelCase )
# First do baseline
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = setup['''no''']
model.to(_UpperCamelCase )
model.eval()
for batch in dataloader:
batch.to(_UpperCamelCase )
with torch.inference_mode():
__lowerCamelCase = model(**_UpperCamelCase )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_UpperCamelCase ,references=batch['''labels'''] )
__lowerCamelCase = metric.compute()
# Then do distributed
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__lowerCamelCase = model(**_UpperCamelCase )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
__lowerCamelCase = batch['''labels''']
__lowerCamelCase ,__lowerCamelCase = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_UpperCamelCase ,references=_UpperCamelCase )
__lowerCamelCase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] ,distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def a__ ( ):
__lowerCamelCase = Accelerator(split_batches=_UpperCamelCase ,dispatch_batches=_UpperCamelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(_UpperCamelCase ,_UpperCamelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__lowerCamelCase = Accelerator(split_batches=_UpperCamelCase ,dispatch_batches=_UpperCamelCase )
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(_UpperCamelCase ,99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
__lowerCamelCase = Accelerator()
test_torch_metrics(_UpperCamelCase ,5_12 )
accelerator.state._reset_state()
def a__ ( _UpperCamelCase : Optional[int] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 622 | 1 |
import unittest
from transformers import DonutProcessor
lowercase : Optional[int] = "naver-clova-ix/donut-base"
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Tuple = DonutProcessor.from_pretrained(__UpperCamelCase )
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Tuple = {
"name": "John Doe",
"age": "99",
"city": "Atlanta",
"state": "GA",
"zip": "30301",
"phone": "123-4567",
"nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}],
}
__UpperCamelCase : int = (
"<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"
"<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"
"<s_nicknames><s_nickname>Johnny</s_nickname>"
"<sep/><s_nickname>JD</s_nickname></s_nicknames>"
)
__UpperCamelCase : List[str] = self.processor.tokenajson(__UpperCamelCase )
self.assertDictEqual(__UpperCamelCase , __UpperCamelCase ) | 327 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def UpperCAmelCase_ ():
__UpperCamelCase : Any = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=_lowerCAmelCase )
__UpperCamelCase : Optional[Any] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=_lowerCAmelCase )
env_command_parser(subparsers=_lowerCAmelCase )
launch_command_parser(subparsers=_lowerCAmelCase )
tpu_command_parser(subparsers=_lowerCAmelCase )
test_command_parser(subparsers=_lowerCAmelCase )
# Let's go
__UpperCamelCase : int = parser.parse_args()
if not hasattr(_lowerCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(_lowerCAmelCase )
if __name__ == "__main__":
main() | 327 | 1 |
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> str:
__snake_case: List[Any] = set(SCREAMING_SNAKE_CASE__), [start]
while stack:
__snake_case: int = stack.pop()
explored.add(SCREAMING_SNAKE_CASE__)
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v]):
if adj not in explored:
stack.append(SCREAMING_SNAKE_CASE__)
return explored
__UpperCAmelCase : Dict = {
"A": ["B", "C", "D"],
"B": ["A", "D", "E"],
"C": ["A", "F"],
"D": ["B", "D"],
"E": ["B", "F"],
"F": ["C", "E", "G"],
"G": ["F"],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, "A"))
| 714 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def A__ ( ) -> Tuple:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__snake_case: List[str] = """__test_patch_submodule_mock__"""
with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj)
assert isinstance(_test_patching.os.path , _PatchedModuleObj)
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj)
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj)
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj)
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj)
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def A__ ( ) -> Tuple:
assert _test_patching.open is open
__snake_case: Tuple = """__test_patch_submodule_builtin_mock__"""
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , """open""" , SCREAMING_SNAKE_CASE__):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def A__ ( ) -> Dict:
# pandas.read_csv is not present in _test_patching
__snake_case: Tuple = """__test_patch_submodule_missing_mock__"""
with patch_submodule(_test_patching , """pandas.read_csv""" , SCREAMING_SNAKE_CASE__):
pass
def A__ ( ) -> int:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
__snake_case: Tuple = """__test_patch_submodule_missing_builtin_mock__"""
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , """len""" , SCREAMING_SNAKE_CASE__) is None
with patch_submodule(_test_patching , """len""" , SCREAMING_SNAKE_CASE__):
assert _test_patching.len is mock
assert _test_patching.len is len
def A__ ( ) -> List[Any]:
__snake_case: Optional[int] = """__test_patch_submodule_start_and_stop_mock__"""
__snake_case: Union[str, Any] = patch_submodule(_test_patching , """open""" , SCREAMING_SNAKE_CASE__)
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def A__ ( ) -> List[Any]:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__snake_case: int = """__test_patch_submodule_successive_join__"""
__snake_case: Union[str, Any] = """__test_patch_submodule_successive_dirname__"""
__snake_case: Tuple = """__test_patch_submodule_successive_rename__"""
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__):
with patch_submodule(_test_patching , """os.rename""" , SCREAMING_SNAKE_CASE__):
with patch_submodule(_test_patching , """os.path.dirname""" , SCREAMING_SNAKE_CASE__):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , """os.rename""" , SCREAMING_SNAKE_CASE__):
with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__):
with patch_submodule(_test_patching , """os.path.dirname""" , SCREAMING_SNAKE_CASE__):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def A__ ( ) -> Optional[Any]:
__snake_case: Tuple = """__test_patch_submodule_doesnt_exist_mock__"""
with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , SCREAMING_SNAKE_CASE__):
pass
with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , SCREAMING_SNAKE_CASE__):
pass
| 155 | 0 |
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
__UpperCAmelCase = get_logger(__name__)
__UpperCAmelCase = r'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n'
class __lowercase :
@add_start_docstrings(A )
def __call__( self : int ,A : jnp.ndarray ,A : jnp.ndarray ):
'''simple docstring'''
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class __lowercase :
@add_start_docstrings(A )
def __call__( self : Tuple ,A : jnp.ndarray ,A : jnp.ndarray ):
'''simple docstring'''
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class __lowercase ( __lowerCamelCase ):
@add_start_docstrings(A )
def __call__( self : List[str] ,A : jnp.ndarray ,A : jnp.ndarray ,A : int ,**A : Any ):
'''simple docstring'''
for processor in self:
UpperCAmelCase__ : int = inspect.signature(processor.__call__ ).parameters
if len(A ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f"Make sure that all the required parameters: {list(function_args.keys() )} for "
f"{processor.__class__} are passed to the logits processor." )
UpperCAmelCase__ : str = processor(A ,A ,A ,**A )
else:
UpperCAmelCase__ : Optional[int] = processor(A ,A ,A )
return scores
class __lowercase ( __lowerCamelCase ):
def __init__( self : Dict ,A : float ):
'''simple docstring'''
if not isinstance(A ,A ) or not (temperature > 0):
raise ValueError(f"`temperature` has to be a strictly positive float, but is {temperature}" )
UpperCAmelCase__ : int = temperature
def __call__( self : Union[str, Any] ,A : jnp.ndarray ,A : jnp.ndarray ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = scores / self.temperature
return scores
class __lowercase ( __lowerCamelCase ):
def __init__( self : int ,A : float ,A : float = -float("""Inf""" ) ,A : int = 1 ):
'''simple docstring'''
if not isinstance(A ,A ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}" )
if not isinstance(A ,A ) or (min_tokens_to_keep < 1):
raise ValueError(f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}" )
UpperCAmelCase__ : Dict = top_p
UpperCAmelCase__ : Union[str, Any] = filter_value
UpperCAmelCase__ : List[Any] = min_tokens_to_keep
def __call__( self : List[str] ,A : jnp.ndarray ,A : jnp.ndarray ,A : int ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : str = lax.top_k(A ,scores.shape[-1] )
UpperCAmelCase__ : Any = jnp.full_like(A ,self.filter_value )
UpperCAmelCase__ : Tuple = jax.nn.softmax(A ,axis=-1 ).cumsum(axis=-1 )
UpperCAmelCase__ : Optional[int] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
UpperCAmelCase__ : Tuple = jnp.roll(A ,1 )
score_mask |= score_mask.at[:, 0].set(A )
# min tokens to keep
UpperCAmelCase__ : Optional[int] = score_mask.at[:, : self.min_tokens_to_keep].set(A )
UpperCAmelCase__ : Dict = jnp.where(A ,A ,A )
UpperCAmelCase__ : Any = jax.lax.sort_key_val(A ,A )[-1]
return next_scores
class __lowercase ( __lowerCamelCase ):
def __init__( self : Any ,A : int ,A : float = -float("""Inf""" ) ,A : int = 1 ):
'''simple docstring'''
if not isinstance(A ,A ) or top_k <= 0:
raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}" )
UpperCAmelCase__ : Dict = max(A ,A )
UpperCAmelCase__ : str = filter_value
def __call__( self : int ,A : jnp.ndarray ,A : jnp.ndarray ,A : int ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = scores.shape
UpperCAmelCase__ : Optional[int] = jnp.full(batch_size * vocab_size ,self.filter_value )
UpperCAmelCase__ : Any = min(self.top_k ,scores.shape[-1] ) # Safety check
UpperCAmelCase__ , UpperCAmelCase__ : Any = lax.top_k(A ,A )
UpperCAmelCase__ : Union[str, Any] = jnp.broadcast_to((jnp.arange(A ) * vocab_size)[:, None] ,(batch_size, topk) ).flatten()
UpperCAmelCase__ : Union[str, Any] = topk_scores.flatten()
UpperCAmelCase__ : Dict = topk_indices.flatten() + shift
UpperCAmelCase__ : Any = next_scores_flat.at[topk_indices_flat].set(A )
UpperCAmelCase__ : List[str] = next_scores_flat.reshape(A ,A )
return next_scores
class __lowercase ( __lowerCamelCase ):
def __init__( self : List[Any] ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = bos_token_id
def __call__( self : int ,A : jnp.ndarray ,A : jnp.ndarray ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = jnp.full(scores.shape ,-float("""inf""" ) )
UpperCAmelCase__ : List[Any] = 1 - jnp.bool_(cur_len - 1 )
UpperCAmelCase__ : Any = jnp.where(A ,new_scores.at[:, self.bos_token_id].set(0 ) ,A )
return scores
class __lowercase ( __lowerCamelCase ):
def __init__( self : Optional[int] ,A : int ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = max_length
UpperCAmelCase__ : Dict = eos_token_id
def __call__( self : Union[str, Any] ,A : jnp.ndarray ,A : jnp.ndarray ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : str = jnp.full(scores.shape ,-float("""inf""" ) )
UpperCAmelCase__ : Union[str, Any] = 1 - jnp.bool_(cur_len - self.max_length + 1 )
UpperCAmelCase__ : Any = jnp.where(A ,new_scores.at[:, self.eos_token_id].set(0 ) ,A )
return scores
class __lowercase ( __lowerCamelCase ):
def __init__( self : int ,A : int ,A : int ):
'''simple docstring'''
if not isinstance(A ,A ) or min_length < 0:
raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}" )
if not isinstance(A ,A ) or eos_token_id < 0:
raise ValueError(f"`eos_token_id` has to be a positive integer, but is {eos_token_id}" )
UpperCAmelCase__ : str = min_length
UpperCAmelCase__ : Dict = eos_token_id
def __call__( self : str ,A : jnp.ndarray ,A : jnp.ndarray ,A : int ):
'''simple docstring'''
# create boolean flag to decide if min length penalty should be applied
UpperCAmelCase__ : List[Any] = 1 - jnp.clip(cur_len - self.min_length ,0 ,1 )
UpperCAmelCase__ : str = jnp.where(A ,scores.at[:, self.eos_token_id].set(-float("""inf""" ) ) ,A )
return scores
class __lowercase ( __lowerCamelCase ):
def __init__( self : int ,A : List[str] ,A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = list(A )
UpperCAmelCase__ : Optional[int] = begin_index
def __call__( self : Dict ,A : int ,A : List[Any] ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : int = 1 - jnp.bool_(cur_len - self.begin_index )
UpperCAmelCase__ : Tuple = jnp.where(A ,scores.at[:, self.begin_suppress_tokens].set(-float("""inf""" ) ) ,A )
return scores
class __lowercase ( __lowerCamelCase ):
def __init__( self : List[str] ,A : list ):
'''simple docstring'''
UpperCAmelCase__ : int = list(A )
def __call__( self : int ,A : jnp.ndarray ,A : jnp.ndarray ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : Any = scores.at[..., self.suppress_tokens].set(-float("""inf""" ) )
return scores
class __lowercase ( __lowerCamelCase ):
def __init__( self : Union[str, Any] ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = dict(A )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
UpperCAmelCase__ : str = jnp.ones((max(force_token_map.keys() ) + 1) ,dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
UpperCAmelCase__ : str = force_token_array.at[index].set(A )
UpperCAmelCase__ : Union[str, Any] = jnp.intaa(A )
def __call__( self : List[str] ,A : jnp.ndarray ,A : jnp.ndarray ,A : int ):
'''simple docstring'''
def _force_token(A : Any ):
UpperCAmelCase__ : int = scores.shape[0]
UpperCAmelCase__ : Union[str, Any] = self.force_token_array[generation_idx]
UpperCAmelCase__ : str = jnp.ones_like(A ,dtype=scores.dtype ) * -float("""inf""" )
UpperCAmelCase__ : Any = jnp.zeros((batch_size, 1) ,dtype=scores.dtype )
UpperCAmelCase__ : Tuple = lax.dynamic_update_slice(A ,A ,(0, current_token) )
return new_scores
UpperCAmelCase__ : Optional[Any] = lax.cond(
cur_len >= self.force_token_array.shape[0] ,lambda: scores ,lambda: lax.cond(
self.force_token_array[cur_len] >= 0 ,lambda: _force_token(A ) ,lambda: scores ,) ,)
return scores
class __lowercase ( __lowerCamelCase ):
def __init__( self : Optional[int] ,A : Optional[Any] ,A : List[str] ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : str = generate_config.eos_token_id
UpperCAmelCase__ : Dict = generate_config.no_timestamps_token_id
UpperCAmelCase__ : int = generate_config.no_timestamps_token_id + 1
UpperCAmelCase__ : Any = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(A ,"""max_initial_timestamp_index""" ):
UpperCAmelCase__ : int = generate_config.max_initial_timestamp_index
else:
UpperCAmelCase__ : Optional[Any] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
UpperCAmelCase__ : int = model_config.vocab_size
def __call__( self : int ,A : str ,A : Dict ,A : List[str] ):
'''simple docstring'''
# suppress <|notimestamps|> which is handled by without_timestamps
UpperCAmelCase__ : Union[str, Any] = scores.at[:, self.no_timestamps_token_id].set(-float("""inf""" ) )
def handle_pairs(A : str ,A : Optional[Any] ):
UpperCAmelCase__ : str = jnp.where((cur_len - self.begin_index) >= 1 ,A ,A )
UpperCAmelCase__ : Tuple = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin ,True and last_was_timestamp ,A ,)
UpperCAmelCase__ : Tuple = jnp.where((cur_len - self.begin_index) < 2 ,A ,A )
UpperCAmelCase__ : Any = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin ,A ,A ,)
return jnp.where(
A ,jnp.where(
penultimate_was_timestamp > 0 ,scores_k.at[self.timestamp_begin :].set(-float("""inf""" ) ) ,scores_k.at[: self.eos_token_id].set(-float("""inf""" ) ) ,) ,A ,)
UpperCAmelCase__ : Any = jax.vmap(A )(A ,A )
UpperCAmelCase__ : Optional[Any] = jnp.where(cur_len == self.begin_index ,A ,A )
UpperCAmelCase__ : Tuple = jnp.where(
self.max_initial_timestamp_index is not None ,True and apply_max_initial_timestamp ,A ,)
UpperCAmelCase__ : Dict = self.timestamp_begin + self.max_initial_timestamp_index
UpperCAmelCase__ : Optional[int] = jnp.where(
A ,scores.at[:, last_allowed + 1 :].set(-float("""inf""" ) ) ,A ,)
# if sum of probability over timestamps is above any other token, sample timestamp
UpperCAmelCase__ : Optional[Any] = jax.nn.log_softmax(A ,axis=-1 )
def handle_cumulative_probs(A : Optional[int] ,A : Optional[int] ):
UpperCAmelCase__ : Union[str, Any] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] ,axis=-1 )
UpperCAmelCase__ : List[Any] = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob ,scores_k.at[: self.timestamp_begin].set(-float("""inf""" ) ) ,A ,)
UpperCAmelCase__ : Dict = jax.vmap(A )(A ,A )
return scores
| 65 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_lowercase : List[str] = logging.get_logger(__name__)
class UpperCamelCase__( lowerCAmelCase ):
def __init__( self : str , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[Any] )-> None:
"""simple docstring"""
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
| 210 | 0 |
"""simple docstring"""
def A_ ( __UpperCamelCase : int = 10 , __UpperCamelCase : int = 22 ):
lowercase = range(1 , __UpperCamelCase )
lowercase = range(1 , __UpperCamelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(10, 22) = }''') | 396 |
"""simple docstring"""
def A_ ( __UpperCamelCase : str , __UpperCamelCase : str ):
lowercase = len(__UpperCamelCase )
lowercase = []
for i in range(len(__UpperCamelCase ) - pat_len + 1 ):
lowercase = True
for j in range(__UpperCamelCase ):
if s[i + j] != pattern[j]:
lowercase = False
break
if match_found:
position.append(__UpperCamelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC''')) | 396 | 1 |
__snake_case :Dict =8.31_4462 # Unit - J mol-1 K-1
def lowerCamelCase_ ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ) -> float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowerCamelCase_ ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ) -> float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod() | 106 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
lowerCAmelCase : int = """bart"""
lowerCAmelCase : List[Any] = True
@st.cache(allow_output_mutation=snake_case__ )
def a__ ( ) -> List[Any]:
if LOAD_DENSE_INDEX:
lowerCamelCase = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
lowerCamelCase = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
lowerCamelCase = qar_model.eval()
else:
lowerCamelCase , lowerCamelCase = (None, None)
if MODEL_TYPE == "bart":
lowerCamelCase = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
lowerCamelCase = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
lowerCamelCase = sas_model.eval()
else:
lowerCamelCase , lowerCamelCase = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=snake_case__ )
def a__ ( ) -> List[str]:
if LOAD_DENSE_INDEX:
lowerCamelCase = faiss.StandardGpuResources()
lowerCamelCase = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
lowerCamelCase = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 1_28) , )
lowerCamelCase = faiss.IndexFlatIP(1_28 )
lowerCamelCase = faiss.index_cpu_to_gpu(snake_case__ , 1 , snake_case__ )
wikiaab_gpu_index_flat.add(snake_case__ ) # TODO fix for larger GPU
else:
lowerCamelCase , lowerCamelCase = (None, None)
lowerCamelCase = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=snake_case__ )
def a__ ( ) -> Any:
lowerCamelCase = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
lowerCamelCase = elia["""train_eli5"""]
lowerCamelCase = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 1_28) )
lowerCamelCase = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(snake_case__ )
return (elia_train, eli5_train_q_index)
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = load_indexes()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[Any] = load_models()
lowerCAmelCase , lowerCAmelCase : int = load_train_data()
def a__ ( snake_case__ , snake_case__=10 ) -> Tuple:
lowerCamelCase = embed_questions_for_retrieval([question] , snake_case__ , snake_case__ )
lowerCamelCase , lowerCamelCase = eli5_train_q_index.search(snake_case__ , snake_case__ )
lowerCamelCase = [elia_train[int(snake_case__ )] for i in I[0]]
return nn_examples
def a__ ( snake_case__ , snake_case__="wiki40b" , snake_case__="dense" , snake_case__=10 ) -> Dict:
if source == "none":
lowerCamelCase , lowerCamelCase = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
lowerCamelCase , lowerCamelCase = query_qa_dense_index(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
lowerCamelCase , lowerCamelCase = query_es_index(
snake_case__ , snake_case__ , index_name="""english_wiki40b_snippets_100w""" , n_results=snake_case__ , )
lowerCamelCase = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
lowerCamelCase = """question: {} context: {}""".format(snake_case__ , snake_case__ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda snake_case__ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda snake_case__ : None),
} )
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=64 , snake_case__=2_56 , snake_case__=False , snake_case__=2 , snake_case__=0.95 , snake_case__=0.8 ) -> str:
with torch.no_grad():
lowerCamelCase = qa_sas_generate(
snake_case__ , snake_case__ , snake_case__ , num_answers=1 , num_beams=snake_case__ , min_len=snake_case__ , max_len=snake_case__ , do_sample=snake_case__ , temp=snake_case__ , top_p=snake_case__ , top_k=snake_case__ , max_input_length=10_24 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
lowerCAmelCase : Tuple = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
lowerCAmelCase : Optional[int] = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
lowerCAmelCase : Union[str, Any] = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
lowerCAmelCase : Union[str, Any] = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
lowerCAmelCase : Tuple = st.sidebar.checkbox("""Demo options""")
if demo_options:
lowerCAmelCase : Optional[int] = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
lowerCAmelCase : Tuple = action_list.index(action_st)
lowerCAmelCase : Union[str, Any] = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
lowerCAmelCase : Any = show_type == """Show full text of passages"""
else:
lowerCAmelCase : int = 3
lowerCAmelCase : Tuple = True
lowerCAmelCase : List[str] = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
lowerCAmelCase : List[str] = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
lowerCAmelCase : str = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
lowerCAmelCase : Optional[Any] = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
lowerCAmelCase : Optional[int] = """wiki40b"""
lowerCAmelCase : List[str] = """dense"""
lowerCAmelCase : Tuple = """beam"""
lowerCAmelCase : Optional[int] = 2
lowerCAmelCase : Dict = 64
lowerCAmelCase : Optional[int] = 256
lowerCAmelCase : Union[str, Any] = None
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Tuple = st.sidebar.checkbox("""Generation options""")
if generate_options:
lowerCAmelCase : Optional[Any] = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
lowerCAmelCase : str = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
lowerCAmelCase : int = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
lowerCAmelCase : Tuple = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
lowerCAmelCase : int = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
lowerCAmelCase : str = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
lowerCAmelCase : int = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
lowerCAmelCase : List[Any] = None
# start main text
lowerCAmelCase : Optional[int] = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
lowerCAmelCase : List[str] = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
lowerCAmelCase : Any = st.text_input("""Enter your question here:""", """""")
else:
lowerCAmelCase : List[str] = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
lowerCAmelCase , lowerCAmelCase : Any = make_support(question, source=wiki_source, method="""dense""", n_results=10)
lowerCAmelCase , lowerCAmelCase : Tuple = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
lowerCAmelCase : Union[str, Any] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
lowerCAmelCase : int = support_list[:10]
lowerCAmelCase : str = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
lowerCAmelCase , lowerCAmelCase : Optional[Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
lowerCAmelCase , lowerCAmelCase : int = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
lowerCAmelCase : Any = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
lowerCAmelCase : Dict = res[1].strip()
if sec_titles == "":
lowerCAmelCase : Optional[Any] = """[{}]({})""".format(res[0], wiki_url)
else:
lowerCAmelCase : Tuple = sec_titles.split(""" & """)
lowerCAmelCase : List[str] = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
lowerCAmelCase : Tuple = find_nearest_training(question)
lowerCAmelCase : List[str] = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
lowerCAmelCase : int = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
lowerCAmelCase : Tuple = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 543 | 0 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
_UpperCamelCase = {
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1000,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
_UpperCamelCase = {
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1000,
"block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
_UpperCamelCase = {
"sample_size": 256,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
_UpperCamelCase = {
"num_train_timesteps": 40,
"sigma_min": 0.0_02,
"sigma_max": 80.0,
}
_UpperCamelCase = {
"num_train_timesteps": 201,
"sigma_min": 0.0_02,
"sigma_max": 80.0,
}
_UpperCamelCase = {
"num_train_timesteps": 151,
"sigma_min": 0.0_02,
"sigma_max": 80.0,
}
def _lowercase ( lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('''boolean value expected''' )
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=False ):
__lowerCAmelCase : Optional[int] = checkpoint[f"""{old_prefix}.in_layers.0.weight"""]
__lowerCAmelCase : List[str] = checkpoint[f"""{old_prefix}.in_layers.0.bias"""]
__lowerCAmelCase : Tuple = checkpoint[f"""{old_prefix}.in_layers.2.weight"""]
__lowerCAmelCase : Optional[int] = checkpoint[f"""{old_prefix}.in_layers.2.bias"""]
__lowerCAmelCase : List[str] = checkpoint[f"""{old_prefix}.emb_layers.1.weight"""]
__lowerCAmelCase : Union[str, Any] = checkpoint[f"""{old_prefix}.emb_layers.1.bias"""]
__lowerCAmelCase : Dict = checkpoint[f"""{old_prefix}.out_layers.0.weight"""]
__lowerCAmelCase : List[Any] = checkpoint[f"""{old_prefix}.out_layers.0.bias"""]
__lowerCAmelCase : str = checkpoint[f"""{old_prefix}.out_layers.3.weight"""]
__lowerCAmelCase : Dict = checkpoint[f"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
__lowerCAmelCase : Dict = checkpoint[f"""{old_prefix}.skip_connection.weight"""]
__lowerCAmelCase : Dict = checkpoint[f"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=None ):
__lowerCAmelCase : Dict = checkpoint[f"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
__lowerCAmelCase : List[Any] = checkpoint[f"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
__lowerCAmelCase : Tuple = checkpoint[f"""{old_prefix}.norm.weight"""]
__lowerCAmelCase : str = checkpoint[f"""{old_prefix}.norm.bias"""]
__lowerCAmelCase : List[str] = weight_q.squeeze(-1 ).squeeze(-1 )
__lowerCAmelCase : Any = bias_q.squeeze(-1 ).squeeze(-1 )
__lowerCAmelCase : Tuple = weight_k.squeeze(-1 ).squeeze(-1 )
__lowerCAmelCase : List[str] = bias_k.squeeze(-1 ).squeeze(-1 )
__lowerCAmelCase : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
__lowerCAmelCase : str = bias_v.squeeze(-1 ).squeeze(-1 )
__lowerCAmelCase : List[str] = (
checkpoint[f"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
__lowerCAmelCase : Dict = checkpoint[f"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : Dict = torch.load(lowercase__ , map_location='''cpu''' )
__lowerCAmelCase : List[str] = {}
__lowerCAmelCase : Union[str, Any] = checkpoint['''time_embed.0.weight''']
__lowerCAmelCase : Optional[int] = checkpoint['''time_embed.0.bias''']
__lowerCAmelCase : Any = checkpoint['''time_embed.2.weight''']
__lowerCAmelCase : Tuple = checkpoint['''time_embed.2.bias''']
if unet_config["num_class_embeds"] is not None:
__lowerCAmelCase : int = checkpoint['''label_emb.weight''']
__lowerCAmelCase : List[str] = checkpoint['''input_blocks.0.0.weight''']
__lowerCAmelCase : Union[str, Any] = checkpoint['''input_blocks.0.0.bias''']
__lowerCAmelCase : Tuple = unet_config['''down_block_types''']
__lowerCAmelCase : Optional[int] = unet_config['''layers_per_block''']
__lowerCAmelCase : int = unet_config['''attention_head_dim''']
__lowerCAmelCase : Tuple = unet_config['''block_out_channels''']
__lowerCAmelCase : str = 1
__lowerCAmelCase : str = channels_list[0]
for i, layer_type in enumerate(lowercase__ ):
__lowerCAmelCase : Union[str, Any] = channels_list[i]
__lowerCAmelCase : List[str] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(lowercase__ ):
__lowerCAmelCase : Optional[int] = f"""down_blocks.{i}.resnets.{j}"""
__lowerCAmelCase : Any = f"""input_blocks.{current_layer}.0"""
__lowerCAmelCase : str = True if j == 0 and downsample_block_has_skip else False
__lowerCAmelCase : str = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ , has_skip=lowercase__ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(lowercase__ ):
__lowerCAmelCase : List[Any] = f"""down_blocks.{i}.resnets.{j}"""
__lowerCAmelCase : List[Any] = f"""input_blocks.{current_layer}.0"""
__lowerCAmelCase : List[Any] = True if j == 0 and downsample_block_has_skip else False
__lowerCAmelCase : List[Any] = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ , has_skip=lowercase__ )
__lowerCAmelCase : int = f"""down_blocks.{i}.attentions.{j}"""
__lowerCAmelCase : Tuple = f"""input_blocks.{current_layer}.1"""
__lowerCAmelCase : List[str] = convert_attention(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
current_layer += 1
if i != len(lowercase__ ) - 1:
__lowerCAmelCase : List[str] = f"""down_blocks.{i}.downsamplers.0"""
__lowerCAmelCase : Optional[int] = f"""input_blocks.{current_layer}.0"""
__lowerCAmelCase : Union[str, Any] = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
current_layer += 1
__lowerCAmelCase : int = current_channels
# hardcoded the mid-block for now
__lowerCAmelCase : Optional[Any] = '''mid_block.resnets.0'''
__lowerCAmelCase : str = '''middle_block.0'''
__lowerCAmelCase : Optional[int] = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
__lowerCAmelCase : Optional[Any] = '''mid_block.attentions.0'''
__lowerCAmelCase : Union[str, Any] = '''middle_block.1'''
__lowerCAmelCase : Optional[int] = convert_attention(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
__lowerCAmelCase : str = '''mid_block.resnets.1'''
__lowerCAmelCase : Optional[int] = '''middle_block.2'''
__lowerCAmelCase : Dict = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
__lowerCAmelCase : List[str] = 0
__lowerCAmelCase : Union[str, Any] = unet_config['''up_block_types''']
for i, layer_type in enumerate(lowercase__ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
__lowerCAmelCase : List[str] = f"""up_blocks.{i}.resnets.{j}"""
__lowerCAmelCase : Optional[Any] = f"""output_blocks.{current_layer}.0"""
__lowerCAmelCase : Any = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ , has_skip=lowercase__ )
current_layer += 1
if i != len(lowercase__ ) - 1:
__lowerCAmelCase : Optional[int] = f"""up_blocks.{i}.upsamplers.0"""
__lowerCAmelCase : List[Any] = f"""output_blocks.{current_layer-1}.1"""
__lowerCAmelCase : str = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
__lowerCAmelCase : Optional[int] = f"""up_blocks.{i}.resnets.{j}"""
__lowerCAmelCase : Dict = f"""output_blocks.{current_layer}.0"""
__lowerCAmelCase : Tuple = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ , has_skip=lowercase__ )
__lowerCAmelCase : Any = f"""up_blocks.{i}.attentions.{j}"""
__lowerCAmelCase : Optional[Any] = f"""output_blocks.{current_layer}.1"""
__lowerCAmelCase : Any = convert_attention(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
current_layer += 1
if i != len(lowercase__ ) - 1:
__lowerCAmelCase : List[Any] = f"""up_blocks.{i}.upsamplers.0"""
__lowerCAmelCase : Dict = f"""output_blocks.{current_layer-1}.2"""
__lowerCAmelCase : Optional[Any] = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
__lowerCAmelCase : Dict = checkpoint['''out.0.weight''']
__lowerCAmelCase : int = checkpoint['''out.0.bias''']
__lowerCAmelCase : Optional[Any] = checkpoint['''out.2.weight''']
__lowerCAmelCase : Dict = checkpoint['''out.2.bias''']
return new_checkpoint
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
_UpperCamelCase = parser.parse_args()
_UpperCamelCase = strabool(args.class_cond)
_UpperCamelCase = os.path.basename(args.unet_path)
print(F"Checkpoint: {ckpt_name}")
# Get U-Net config
if "imagenet64" in ckpt_name:
_UpperCamelCase = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_UpperCamelCase = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
_UpperCamelCase = TEST_UNET_CONFIG
else:
raise ValueError(F"Checkpoint type {ckpt_name} is not currently supported.")
if not args.class_cond:
_UpperCamelCase = None
_UpperCamelCase = con_pt_to_diffuser(args.unet_path, unet_config)
_UpperCamelCase = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
_UpperCamelCase = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
_UpperCamelCase = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_UpperCamelCase = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F"Checkpoint type {ckpt_name} is not currently supported.")
_UpperCamelCase = CMStochasticIterativeScheduler(**scheduler_config)
_UpperCamelCase = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 706 |
from __future__ import annotations
import typing
from collections import Counter
def _lowercase ( lowercase__ ):
__lowerCAmelCase : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(lowercase__ , max_perimeter + 1 ):
__lowerCAmelCase : Optional[int] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(lowercase__ ):
__lowerCAmelCase : Optional[int] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def _lowercase ( lowercase__ = 1_0_0_0 ):
__lowerCAmelCase : Optional[int] = pythagorean_triple(lowercase__ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F"Perimeter {solution()} has maximum solutions")
| 583 | 0 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__magic_name__ : Tuple = logging.get_logger(__name__)
__magic_name__ : int = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__magic_name__ : Tuple = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
__magic_name__ : Dict = {'''allegro/herbert-base-cased''': 514}
__magic_name__ : List[Any] = {}
class A__ ( __snake_case ):
'''simple docstring'''
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = HerbertTokenizer
def __init__( self : Optional[int] , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : str="<s>" , _SCREAMING_SNAKE_CASE : str="<unk>" , _SCREAMING_SNAKE_CASE : Tuple="<pad>" , _SCREAMING_SNAKE_CASE : Optional[Any]="<mask>" , _SCREAMING_SNAKE_CASE : Dict="</s>" , **_SCREAMING_SNAKE_CASE : Dict , ):
"""simple docstring"""
super().__init__(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def _SCREAMING_SNAKE_CASE ( self : str , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any] = None ):
"""simple docstring"""
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str] = None , _SCREAMING_SNAKE_CASE : Dict = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str = None ):
"""simple docstring"""
UpperCamelCase = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 280 |
"""simple docstring"""
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
lowercase_ = True
from torch.cuda.amp import autocast
lowercase_ = logging.getLogger(__name__)
def UpperCAmelCase ( _lowercase : Optional[Any]=None , _lowercase : str=None ) -> List[str]:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=_lowercase )
@dataclass
class __a :
lowerCamelCase : str =field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCamelCase : Optional[str] =field(
default=__snake_case , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCamelCase : Optional[bool] =field(
default=__snake_case , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
lowerCamelCase : Optional[float] =field(
default=0.1 , metadata={'help': 'The dropout ratio for the attention probabilities.'} )
lowerCamelCase : Optional[float] =field(
default=0.1 , metadata={'help': 'The dropout ratio for activations inside the fully connected layer.'} )
lowerCamelCase : Optional[float] =field(
default=0.1 , metadata={
'help': 'The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'
} , )
lowerCamelCase : Optional[float] =field(
default=0.1 , metadata={'help': 'The dropout probabilitiy for all 1D convolutional layers in feature extractor.'} , )
lowerCamelCase : Optional[float] =field(
default=0.05 , metadata={
'help': (
'Propability of each feature vector along the time axis to be chosen as the start of the vector'
'span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'
'vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'
)
} , )
lowerCamelCase : Optional[float] =field(default=0.0 , metadata={'help': 'The LayerDrop probability.'} )
@dataclass
class __a :
lowerCamelCase : Optional[str] =field(
default=__snake_case , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowerCamelCase : Optional[str] =field(
default='train+validation' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowerCamelCase : bool =field(
default=__snake_case , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
lowerCamelCase : Optional[int] =field(
default=__snake_case , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCamelCase : Optional[int] =field(
default=__snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCamelCase : Optional[int] =field(
default=__snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of validation examples to this '
'value if set.'
)
} , )
lowerCamelCase : List[str] =list_field(
default=[',', '?', '.', '!', '-', ';', ':', '""', '%', '\'', '"', '�'] , metadata={'help': 'A list of characters to remove from the transcripts.'} , )
@dataclass
class __a :
lowerCamelCase : WavaVecaProcessor
lowerCamelCase : Union[bool, str] =True
lowerCamelCase : Optional[int] =None
lowerCamelCase : Optional[int] =None
lowerCamelCase : Optional[int] =None
lowerCamelCase : Optional[int] =None
def __call__( self , UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = [{'''input_values''': feature['''input_values''']} for feature in features]
lowerCAmelCase_ = [{'''input_ids''': feature['''labels''']} for feature in features]
lowerCAmelCase_ = self.processor.pad(
UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
lowerCAmelCase_ = self.processor.pad(
labels=UpperCAmelCase , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='''pt''' , )
# replace padding with -100 to ignore loss correctly
lowerCAmelCase_ = labels_batch['''input_ids'''].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
lowerCAmelCase_ = labels
return batch
class __a ( __snake_case ):
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
model.train()
lowerCAmelCase_ = self._prepare_inputs(UpperCAmelCase )
if self.use_amp:
with autocast():
lowerCAmelCase_ = self.compute_loss(UpperCAmelCase , UpperCAmelCase )
else:
lowerCAmelCase_ = self.compute_loss(UpperCAmelCase , UpperCAmelCase )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
lowerCAmelCase_ = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowerCAmelCase_ = loss.sum() / (inputs['''labels'''] >= 0).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
lowerCAmelCase_ = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(UpperCAmelCase ).backward()
elif self.use_apex:
with amp.scale_loss(UpperCAmelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(UpperCAmelCase )
else:
loss.backward()
return loss.detach()
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
lowerCAmelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCAmelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , _lowercase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
lowerCAmelCase_ = datasets.load_dataset(
'''common_voice''' , data_args.dataset_config_name , split=data_args.train_split_name )
lowerCAmelCase_ = datasets.load_dataset('''common_voice''' , data_args.dataset_config_name , split='''test''' )
# Create and save tokenizer
lowerCAmelCase_ = F"""[{"".join(data_args.chars_to_ignore )}]"""
def remove_special_characters(_lowercase : List[str] ):
lowerCAmelCase_ = re.sub(_lowercase , '''''' , batch['''sentence'''] ).lower() + ''' '''
return batch
lowerCAmelCase_ = train_dataset.map(_lowercase , remove_columns=['''sentence'''] )
lowerCAmelCase_ = eval_dataset.map(_lowercase , remove_columns=['''sentence'''] )
def extract_all_chars(_lowercase : str ):
lowerCAmelCase_ = ''' '''.join(batch['''text'''] )
lowerCAmelCase_ = list(set(_lowercase ) )
return {"vocab": [vocab], "all_text": [all_text]}
lowerCAmelCase_ = train_dataset.map(
_lowercase , batched=_lowercase , batch_size=-1 , keep_in_memory=_lowercase , remove_columns=train_dataset.column_names , )
lowerCAmelCase_ = train_dataset.map(
_lowercase , batched=_lowercase , batch_size=-1 , keep_in_memory=_lowercase , remove_columns=eval_dataset.column_names , )
lowerCAmelCase_ = list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) )
lowerCAmelCase_ = {v: k for k, v in enumerate(_lowercase )}
lowerCAmelCase_ = vocab_dict[''' ''']
del vocab_dict[" "]
lowerCAmelCase_ = len(_lowercase )
lowerCAmelCase_ = len(_lowercase )
with open('''vocab.json''' , '''w''' ) as vocab_file:
json.dump(_lowercase , _lowercase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase_ = WavaVecaCTCTokenizer(
'''vocab.json''' , unk_token='''[UNK]''' , pad_token='''[PAD]''' , word_delimiter_token='''|''' , )
lowerCAmelCase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0.0 , do_normalize=_lowercase , return_attention_mask=_lowercase )
lowerCAmelCase_ = WavaVecaProcessor(feature_extractor=_lowercase , tokenizer=_lowercase )
lowerCAmelCase_ = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='''mean''' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
lowerCAmelCase_ = min(len(_lowercase ) , data_args.max_train_samples )
lowerCAmelCase_ = train_dataset.select(range(_lowercase ) )
if data_args.max_val_samples is not None:
lowerCAmelCase_ = eval_dataset.select(range(data_args.max_val_samples ) )
lowerCAmelCase_ = torchaudio.transforms.Resample(4_8_0_0_0 , 1_6_0_0_0 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(_lowercase : Union[str, Any] ):
lowerCAmelCase_ , lowerCAmelCase_ = torchaudio.load(batch['''path'''] )
lowerCAmelCase_ = resampler(_lowercase ).squeeze().numpy()
lowerCAmelCase_ = 1_6_0_0_0
lowerCAmelCase_ = batch['''text''']
return batch
lowerCAmelCase_ = train_dataset.map(
_lowercase , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
lowerCAmelCase_ = eval_dataset.map(
_lowercase , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(_lowercase : str ):
# check that all files have the correct sampling rate
assert (
len(set(batch['''sampling_rate'''] ) ) == 1
), F"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
lowerCAmelCase_ = processor(
audio=batch['''speech'''] , text=batch['''target_text'''] , sampling_rate=batch['''sampling_rate'''][0] )
batch.update(_lowercase )
return batch
lowerCAmelCase_ = train_dataset.map(
_lowercase , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , )
lowerCAmelCase_ = eval_dataset.map(
_lowercase , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , )
# Metric
lowerCAmelCase_ = datasets.load_metric('''wer''' )
def compute_metrics(_lowercase : Optional[int] ):
lowerCAmelCase_ = pred.predictions
lowerCAmelCase_ = np.argmax(_lowercase , axis=-1 )
lowerCAmelCase_ = processor.tokenizer.pad_token_id
lowerCAmelCase_ = processor.batch_decode(_lowercase )
# we do not want to group tokens when computing the metrics
lowerCAmelCase_ = processor.batch_decode(pred.label_ids , group_tokens=_lowercase )
lowerCAmelCase_ = wer_metric.compute(predictions=_lowercase , references=_lowercase )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
lowerCAmelCase_ = DataCollatorCTCWithPadding(processor=_lowercase , padding=_lowercase )
# Initialize our Trainer
lowerCAmelCase_ = CTCTrainer(
model=_lowercase , data_collator=_lowercase , args=_lowercase , compute_metrics=_lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCAmelCase_ = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
lowerCAmelCase_ = model_args.model_name_or_path
else:
lowerCAmelCase_ = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
lowerCAmelCase_ = trainer.train(resume_from_checkpoint=_lowercase )
trainer.save_model()
lowerCAmelCase_ = train_result.metrics
lowerCAmelCase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowercase )
)
lowerCAmelCase_ = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''train''' , _lowercase )
trainer.save_metrics('''train''' , _lowercase )
trainer.save_state()
# Evaluation
lowerCAmelCase_ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCAmelCase_ = trainer.evaluate()
lowerCAmelCase_ = data_args.max_val_samples if data_args.max_val_samples is not None else len(_lowercase )
lowerCAmelCase_ = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''eval''' , _lowercase )
trainer.save_metrics('''eval''' , _lowercase )
return results
if __name__ == "__main__":
main() | 552 | 0 |
'''simple docstring'''
import math
class A :
def __init__( self , snake_case_=0 ) -> Tuple: # a graph with Node 0,1,...,N-1
_a = n
_a = [
[math.inf for j in range(0 , snake_case_ )] for i in range(0 , snake_case_ )
] # adjacency matrix for weight
_a = [
[math.inf for j in range(0 , snake_case_ )] for i in range(0 , snake_case_ )
] # dp[i][j] stores minimum distance from i to j
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Any:
_a = w
def __lowerCAmelCase ( self ) -> List[Any]:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_a = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Any:
return self.dp[u][v]
if __name__ == "__main__":
__snake_case : str = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 691 |
'''simple docstring'''
class A :
def __init__( self ) -> List[str]:
_a = 0
_a = 0
_a = {}
def __lowerCAmelCase ( self , snake_case_ ) -> int:
if vertex not in self.adjacency:
_a = {}
self.num_vertices += 1
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
self.add_vertex(snake_case_ )
self.add_vertex(snake_case_ )
if head == tail:
return
_a = weight
_a = weight
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_edges()
for edge in edges:
_a , _a , _a = edge
edges.remove((tail, head, weight) )
for i in range(len(snake_case_ ) ):
_a = list(edges[i] )
edges.sort(key=lambda snake_case_ : e[2] )
for i in range(len(snake_case_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_a = edges[i][2] + 1
for edge in edges:
_a , _a , _a = edge
_a = weight
_a = weight
def __str__( self ) -> Optional[int]:
_a = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_a = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __lowerCAmelCase ( self ) -> Any:
return self.adjacency.keys()
@staticmethod
def __lowerCAmelCase ( snake_case_=None , snake_case_=None ) -> Any:
_a = Graph()
if vertices is None:
_a = []
if edges is None:
_a = []
for vertex in vertices:
g.add_vertex(snake_case_ )
for edge in edges:
g.add_edge(*snake_case_ )
return g
class A :
def __init__( self ) -> Optional[int]:
_a = {}
_a = {}
def __len__( self ) -> List[Any]:
return len(self.parent )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
if item in self.parent:
return self.find(snake_case_ )
_a = item
_a = 0
return item
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[Any]:
if item not in self.parent:
return self.make_set(snake_case_ )
if item != self.parent[item]:
_a = self.find(self.parent[item] )
return self.parent[item]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Optional[int]:
_a = self.find(snake_case_ )
_a = self.find(snake_case_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_a = roota
return roota
if self.rank[roota] < self.rank[roota]:
_a = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_a = roota
return roota
return None
@staticmethod
def __lowerCAmelCase ( snake_case_ ) -> Tuple:
_a = graph.num_vertices
_a = Graph.UnionFind()
_a = []
while num_components > 1:
_a = {}
for vertex in graph.get_vertices():
_a = -1
_a = graph.get_edges()
for edge in edges:
_a , _a , _a = edge
edges.remove((tail, head, weight) )
for edge in edges:
_a , _a , _a = edge
_a = union_find.find(snake_case_ )
_a = union_find.find(snake_case_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_a , _a , _a = cheap_edge[vertex]
if union_find.find(snake_case_ ) != union_find.find(snake_case_ ):
union_find.union(snake_case_ , snake_case_ )
mst_edges.append(cheap_edge[vertex] )
_a = num_components - 1
_a = Graph.build(edges=snake_case_ )
return mst
| 691 | 1 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( _a ,_a ,_a ) -> str:
# Initialise PyTorch model
UpperCAmelCase_: Any = RemBertConfig.from_json_file(_a )
print("Building PyTorch model from configuration: {}".format(str(_a ) ) )
UpperCAmelCase_: Dict = RemBertModel(_a )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_a ,_a ,_a )
# Save pytorch-model
print("Save PyTorch model to {}".format(_a ) )
torch.save(model.state_dict() ,_a )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--rembert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained RemBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowerCAmelCase = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path) | 137 |
def lowercase ( _a ) -> int:
if not isinstance(_a ,_a ) or number < 0:
raise ValueError("Input must be a non-negative integer" )
UpperCAmelCase_: List[Any] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 137 | 1 |
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {
'''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''',
'''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''',
}
class a__( snake_case__ ):
a_ : Tuple = '''encodec'''
def __init__( self , _UpperCAmelCase=[1.5, 3.0, 6.0, 12.0, 24.0] , _UpperCAmelCase=2_4000 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=128 , _UpperCAmelCase=32 , _UpperCAmelCase=1 , _UpperCAmelCase=[8, 5, 4, 2] , _UpperCAmelCase="weight_norm" , _UpperCAmelCase=7 , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=2 , _UpperCAmelCase=True , _UpperCAmelCase="reflect" , _UpperCAmelCase=2 , _UpperCAmelCase=2 , _UpperCAmelCase=1.0 , _UpperCAmelCase=1024 , _UpperCAmelCase=None , _UpperCAmelCase=True , **_UpperCAmelCase , ) -> str:
snake_case__ =target_bandwidths
snake_case__ =sampling_rate
snake_case__ =audio_channels
snake_case__ =normalize
snake_case__ =chunk_length_s
snake_case__ =overlap
snake_case__ =hidden_size
snake_case__ =num_filters
snake_case__ =num_residual_layers
snake_case__ =upsampling_ratios
snake_case__ =norm_type
snake_case__ =kernel_size
snake_case__ =last_kernel_size
snake_case__ =residual_kernel_size
snake_case__ =dilation_growth_rate
snake_case__ =use_causal_conv
snake_case__ =pad_mode
snake_case__ =compress
snake_case__ =num_lstm_layers
snake_case__ =trim_right_ratio
snake_case__ =codebook_size
snake_case__ =codebook_dim if codebook_dim is not None else hidden_size
snake_case__ =use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**_UpperCAmelCase )
@property
def _lowercase ( self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _lowercase ( self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def _lowercase ( self ) -> int:
snake_case__ =np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def _lowercase ( self ) -> int:
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 581 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
'''configuration_transfo_xl''': ['''TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TransfoXLConfig'''],
'''tokenization_transfo_xl''': ['''TransfoXLCorpus''', '''TransfoXLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
'''TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AdaptiveEmbedding''',
'''TransfoXLForSequenceClassification''',
'''TransfoXLLMHeadModel''',
'''TransfoXLModel''',
'''TransfoXLPreTrainedModel''',
'''load_tf_weights_in_transfo_xl''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
'''TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAdaptiveEmbedding''',
'''TFTransfoXLForSequenceClassification''',
'''TFTransfoXLLMHeadModel''',
'''TFTransfoXLMainLayer''',
'''TFTransfoXLModel''',
'''TFTransfoXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 581 | 1 |
__a = [
'Audio',
'Array2D',
'Array3D',
'Array4D',
'Array5D',
'ClassLabel',
'Features',
'Sequence',
'Value',
'Image',
'Translation',
'TranslationVariableLanguages',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 97 |
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def snake_case_ ( lowercase__ ):
return x + 2
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = "x = 3"
UpperCAmelCase__ : List[str] = {}
UpperCAmelCase__ : Tuple = evaluate(snake_case__ , {} , state=snake_case__ )
assert result == 3
self.assertDictEqual(snake_case__ , {"x": 3} )
UpperCAmelCase__ : Any = "x = y"
UpperCAmelCase__ : List[str] = {"y": 5}
UpperCAmelCase__ : Dict = evaluate(snake_case__ , {} , state=snake_case__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(snake_case__ , {"x": 5, "y": 5} )
def UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = "y = add_two(x)"
UpperCAmelCase__ : Tuple = {"x": 3}
UpperCAmelCase__ : Union[str, Any] = evaluate(snake_case__ , {"add_two": add_two} , state=snake_case__ )
assert result == 5
self.assertDictEqual(snake_case__ , {"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
UpperCAmelCase__ : Any = evaluate(snake_case__ , {} , state=snake_case__ )
assert result is None
assert "tried to execute add_two" in out.out
def UpperCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = "x = 3"
UpperCAmelCase__ : List[Any] = {}
UpperCAmelCase__ : Tuple = evaluate(snake_case__ , {} , state=snake_case__ )
assert result == 3
self.assertDictEqual(snake_case__ , {"x": 3} )
def UpperCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : int = "test_dict = {'x': x, 'y': add_two(x)}"
UpperCAmelCase__ : List[Any] = {"x": 3}
UpperCAmelCase__ : int = evaluate(snake_case__ , {"add_two": add_two} , state=snake_case__ )
self.assertDictEqual(snake_case__ , {"x": 3, "y": 5} )
self.assertDictEqual(snake_case__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def UpperCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = "x = 3\ny = 5"
UpperCAmelCase__ : Dict = {}
UpperCAmelCase__ : Union[str, Any] = evaluate(snake_case__ , {} , state=snake_case__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(snake_case__ , {"x": 3, "y": 5} )
def UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = "text = f'This is x: {x}.'"
UpperCAmelCase__ : Dict = {"x": 3}
UpperCAmelCase__ : Dict = evaluate(snake_case__ , {} , state=snake_case__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(snake_case__ , {"x": 3, "text": "This is x: 3."} )
def UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = "if x <= 3:\n y = 2\nelse:\n y = 5"
UpperCAmelCase__ : Dict = {"x": 3}
UpperCAmelCase__ : Dict = evaluate(snake_case__ , {} , state=snake_case__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(snake_case__ , {"x": 3, "y": 2} )
UpperCAmelCase__ : Optional[Any] = {"x": 8}
UpperCAmelCase__ : str = evaluate(snake_case__ , {} , state=snake_case__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(snake_case__ , {"x": 8, "y": 5} )
def UpperCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = "test_list = [x, add_two(x)]"
UpperCAmelCase__ : Any = {"x": 3}
UpperCAmelCase__ : Any = evaluate(snake_case__ , {"add_two": add_two} , state=snake_case__ )
self.assertListEqual(snake_case__ , [3, 5] )
self.assertDictEqual(snake_case__ , {"x": 3, "test_list": [3, 5]} )
def UpperCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : str = "y = x"
UpperCAmelCase__ : str = {"x": 3}
UpperCAmelCase__ : Any = evaluate(snake_case__ , {} , state=snake_case__ )
assert result == 3
self.assertDictEqual(snake_case__ , {"x": 3, "y": 3} )
def UpperCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = "test_list = [x, add_two(x)]\ntest_list[1]"
UpperCAmelCase__ : List[Any] = {"x": 3}
UpperCAmelCase__ : List[Any] = evaluate(snake_case__ , {"add_two": add_two} , state=snake_case__ )
assert result == 5
self.assertDictEqual(snake_case__ , {"x": 3, "test_list": [3, 5]} )
UpperCAmelCase__ : Union[str, Any] = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
UpperCAmelCase__ : str = {"x": 3}
UpperCAmelCase__ : Any = evaluate(snake_case__ , {"add_two": add_two} , state=snake_case__ )
assert result == 5
self.assertDictEqual(snake_case__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = "x = 0\nfor i in range(3):\n x = i"
UpperCAmelCase__ : List[Any] = {}
UpperCAmelCase__ : Any = evaluate(snake_case__ , {"range": range} , state=snake_case__ )
assert result == 2
self.assertDictEqual(snake_case__ , {"x": 2, "i": 2} )
| 199 | 0 |
from __future__ import annotations
from typing import TypedDict
class lowerCAmelCase ( __snake_case ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Optional[int]:
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError('The parameter s type must be str.' )
return [s[i:] + s[:i] for i in range(len(lowercase__ ) )]
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Any:
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError('The parameter s type must be str.' )
if not s:
raise ValueError('The parameter s must not be empty.' )
lowerCamelCase__ : Optional[Any] = all_rotations(lowercase__ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
lowerCamelCase__ : str = {
'bwt_string': ''.join([word[-1] for word in rotations] ),
'idx_original_string': rotations.index(lowercase__ ),
}
return response
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError('The parameter bwt_string type must be str.' )
if not bwt_string:
raise ValueError('The parameter bwt_string must not be empty.' )
try:
lowerCamelCase__ : str = int(lowercase__ )
except ValueError:
raise TypeError(
'The parameter idx_original_string type must be int or passive'
' of cast to int.' )
if idx_original_string < 0:
raise ValueError('The parameter idx_original_string must not be lower than 0.' )
if idx_original_string >= len(lowercase__ ):
raise ValueError(
'The parameter idx_original_string must be lower than' ' len(bwt_string).' )
lowerCamelCase__ : Optional[Any] = [''] * len(lowercase__ )
for _ in range(len(lowercase__ ) ):
for i in range(len(lowercase__ ) ):
lowerCamelCase__ : Any = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
_UpperCAmelCase : Dict = """Provide a string that I will generate its BWT transform: """
_UpperCAmelCase : int = input(entry_msg).strip()
_UpperCAmelCase : Dict = bwt_transform(s)
print(
F"""Burrows Wheeler transform for string '{s}' results """
F"""in '{result["bwt_string"]}'"""
)
_UpperCAmelCase : str = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
F"""Reversing Burrows Wheeler transform for entry '{result["bwt_string"]}' """
F"""we get original string '{original_string}'"""
)
| 702 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = 42
@flax_register_to_config
class lowerCAmelCase ( nn.Module, __UpperCamelCase, __UpperCamelCase ):
UpperCAmelCase__ = 32
UpperCAmelCase__ = 4
UpperCAmelCase__ = 4
UpperCAmelCase__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCAmelCase__ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
UpperCAmelCase__ = False
UpperCAmelCase__ = (3_20, 6_40, 12_80, 12_80)
UpperCAmelCase__ = 2
UpperCAmelCase__ = 8
UpperCAmelCase__ = None
UpperCAmelCase__ = 12_80
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = False
UpperCAmelCase__ = jnp.floataa
UpperCAmelCase__ = True
UpperCAmelCase__ = 0
UpperCAmelCase__ = False
def A_ ( self : Tuple , UpperCAmelCase : jax.random.KeyArray ) -> FrozenDict:
# init input tensors
lowerCamelCase__ : int = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase__ : List[str] = jnp.zeros(UpperCAmelCase , dtype=jnp.floataa )
lowerCamelCase__ : Tuple = jnp.ones((1,) , dtype=jnp.intaa )
lowerCamelCase__ : Dict = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = jax.random.split(UpperCAmelCase )
lowerCamelCase__ : Dict = {'params': params_rng, 'dropout': dropout_rng}
return self.init(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )["params"]
def A_ ( self : Tuple ) -> Optional[int]:
lowerCamelCase__ : Any = self.block_out_channels
lowerCamelCase__ : int = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase__ : Tuple = self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase__ : Optional[Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCamelCase__ : Optional[int] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCamelCase__ : int = FlaxTimestepEmbedding(UpperCAmelCase , dtype=self.dtype )
lowerCamelCase__ : Optional[int] = self.only_cross_attention
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ : str = (only_cross_attention,) * len(self.down_block_types )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ : List[Any] = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase__ : List[Any] = []
lowerCamelCase__ : Dict = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase__ : Dict = output_channel
lowerCamelCase__ : Optional[int] = block_out_channels[i]
lowerCamelCase__ : List[Any] = i == len(UpperCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase__ : Tuple = FlaxCrossAttnDownBlockaD(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCamelCase__ : str = FlaxDownBlockaD(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(UpperCAmelCase )
lowerCamelCase__ : List[Any] = down_blocks
# mid
lowerCamelCase__ : Dict = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
lowerCamelCase__ : Any = []
lowerCamelCase__ : Optional[int] = list(reversed(UpperCAmelCase ) )
lowerCamelCase__ : Any = list(reversed(UpperCAmelCase ) )
lowerCamelCase__ : int = list(reversed(UpperCAmelCase ) )
lowerCamelCase__ : Tuple = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
lowerCamelCase__ : str = output_channel
lowerCamelCase__ : int = reversed_block_out_channels[i]
lowerCamelCase__ : int = reversed_block_out_channels[min(i + 1 , len(UpperCAmelCase ) - 1 )]
lowerCamelCase__ : Optional[Any] = i == len(UpperCAmelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
lowerCamelCase__ : Tuple = FlaxCrossAttnUpBlockaD(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , prev_output_channel=UpperCAmelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCamelCase__ : Optional[Any] = FlaxUpBlockaD(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , prev_output_channel=UpperCAmelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(UpperCAmelCase )
lowerCamelCase__ : Tuple = output_channel
lowerCamelCase__ : Tuple = up_blocks
# out
lowerCamelCase__ : Optional[int] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
lowerCamelCase__ : Any = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : int , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Dict=None , UpperCAmelCase : bool = True , UpperCAmelCase : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(UpperCAmelCase , jnp.ndarray ):
lowerCamelCase__ : List[str] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(UpperCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase__ : List[Any] = timesteps.astype(dtype=jnp.floataa )
lowerCamelCase__ : Any = jnp.expand_dims(UpperCAmelCase , 0 )
lowerCamelCase__ : List[str] = self.time_proj(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.time_embedding(UpperCAmelCase )
# 2. pre-process
lowerCamelCase__ : Dict = jnp.transpose(UpperCAmelCase , (0, 2, 3, 1) )
lowerCamelCase__ : Optional[Any] = self.conv_in(UpperCAmelCase )
# 3. down
lowerCamelCase__ : Any = (sample,)
for down_block in self.down_blocks:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = down_block(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , deterministic=not train )
else:
lowerCamelCase__ , lowerCamelCase__ : Any = down_block(UpperCAmelCase , UpperCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
lowerCamelCase__ : Union[str, Any] = ()
for down_block_res_sample, down_block_additional_residual in zip(
UpperCAmelCase , UpperCAmelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase__ : str = new_down_block_res_samples
# 4. mid
lowerCamelCase__ : List[Any] = self.mid_block(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
lowerCamelCase__ : str = down_block_res_samples[-(self.layers_per_block + 1) :]
lowerCamelCase__ : List[str] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ : List[Any] = up_block(
UpperCAmelCase , temb=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , res_hidden_states_tuple=UpperCAmelCase , deterministic=not train , )
else:
lowerCamelCase__ : int = up_block(UpperCAmelCase , temb=UpperCAmelCase , res_hidden_states_tuple=UpperCAmelCase , deterministic=not train )
# 6. post-process
lowerCamelCase__ : str = self.conv_norm_out(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = nn.silu(UpperCAmelCase )
lowerCamelCase__ : Any = self.conv_out(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = jnp.transpose(UpperCAmelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=UpperCAmelCase )
| 188 | 0 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = LongformerTokenizer
__lowerCAmelCase = True
__lowerCAmelCase = LongformerTokenizerFast
__lowerCAmelCase = True
def _lowerCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a : Dict = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__a : List[str] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__a : Optional[int] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__a : List[str] = {'''unk_token''': '''<unk>'''}
__a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__a : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_UpperCAmelCase ) )
def _lowerCamelCase ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _lowerCamelCase ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : List[str] = '''lower newer'''
__a : List[Any] = '''lower newer'''
return input_text, output_text
def _lowerCamelCase ( self ):
__a : Dict = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a : Tuple = '''lower newer'''
__a : Dict = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__a : Union[str, Any] = tokenizer.tokenize(_UpperCAmelCase ) # , add_prefix_space=True)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : Optional[int] = tokens + [tokenizer.unk_token]
__a : Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Tuple = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=_UpperCAmelCase ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=_UpperCAmelCase ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def _lowerCamelCase ( self ):
__a : Dict = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
__a : Any = tokenizer.encode('''sequence builders''' , add_special_tokens=_UpperCAmelCase )
__a : List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_UpperCAmelCase )
__a : str = tokenizer.encode(
'''sequence builders''' , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__a : Tuple = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__a : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
__a : Dict = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _lowerCamelCase ( self ):
__a : Tuple = self.get_tokenizer()
__a : Union[str, Any] = '''Encode this sequence.'''
__a : List[str] = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
__a : Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__a : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : Union[str, Any] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__a : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
__a : List[str] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
__a : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing spaces after special tokens
__a : Dict = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase )} ) # mask token has a left space
__a : Any = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
__a : Optional[Any] = '''Encode <mask> sequence'''
__a : Union[str, Any] = '''Encode <mask>sequence'''
__a : Tuple = tokenizer.encode(_UpperCAmelCase )
__a : int = encoded.index(_UpperCAmelCase )
__a : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : Union[str, Any] = tokenizer.encode(_UpperCAmelCase )
__a : Any = encoded.index(_UpperCAmelCase )
__a : Any = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__a : Dict = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
__a : List[Any] = self.tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
__a : Optional[Any] = '''A, <mask> AllenNLP sentence.'''
__a : Tuple = tokenizer_r.encode_plus(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase )
__a : Union[str, Any] = tokenizer_p.encode_plus(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
__a : int = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
__a : Any = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
_UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
_UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def _lowerCamelCase ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__a : List[str] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__a : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__a : List[str] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , _UpperCAmelCase )
self.assertEqual(post_processor_state['''add_prefix_space'''] , _UpperCAmelCase )
self.assertEqual(post_processor_state['''trim_offsets'''] , _UpperCAmelCase )
def _lowerCamelCase ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__a : Any = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__a : Union[str, Any] = f"""{text_of_1_token} {text_of_1_token}"""
__a : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__a : List[Any] = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCAmelCase ) + 1, len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
__a : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__a : Any = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCAmelCase ) + 1, len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
__a : str = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__a : Any = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCAmelCase ), len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
__a : List[Any] = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__a : Union[str, Any] = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCAmelCase ), len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
__a : Optional[Any] = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__a : int = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__a : Tuple = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCAmelCase ) + 1, 1 + len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
__a : List[Any] = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__a : List[Any] = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCAmelCase ), 1 + len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
__a : List[str] = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__a : Union[str, Any] = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCAmelCase ), 1 + len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , ) | 52 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase_ ( snake_case_ , unittest.TestCase ):
_lowerCAmelCase : str = KandinskyInpaintPipeline
_lowerCAmelCase : List[Any] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
_lowerCAmelCase : int = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
_lowerCAmelCase : List[str] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_lowerCAmelCase : Union[str, Any] = False
@property
def __lowercase ( self : str ):
"""simple docstring"""
return 32
@property
def __lowercase ( self : str ):
"""simple docstring"""
return 32
@property
def __lowercase ( self : Optional[int] ):
"""simple docstring"""
return self.time_input_dim
@property
def __lowercase ( self : int ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowercase ( self : int ):
"""simple docstring"""
return 1_00
@property
def __lowercase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __lowercase ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
SCREAMING_SNAKE_CASE : Any = MultilingualCLIP(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = text_encoder.eval()
return text_encoder
@property
def __lowercase ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE : Union[str, Any] = UNetaDConditionModel(**lowerCAmelCase__ )
return model
@property
def __lowercase ( self : Optional[Any] ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowercase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Tuple = self.dummy_tokenizer
SCREAMING_SNAKE_CASE : int = self.dummy_unet
SCREAMING_SNAKE_CASE : str = self.dummy_movq
SCREAMING_SNAKE_CASE : Optional[int] = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE : Optional[int] = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowercase ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any]=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowerCAmelCase__ )
# create init_image
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('''RGB''' ).resize((2_56, 2_56) )
# create mask
SCREAMING_SNAKE_CASE : Union[str, Any] = np.ones((64, 64) , dtype=np.floataa )
SCREAMING_SNAKE_CASE : str = 0
if str(lowerCAmelCase__ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __lowercase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = '''cpu'''
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[int] = self.pipeline_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : str = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE : int = output.images
SCREAMING_SNAKE_CASE : int = pipe(
**self.get_dummy_inputs(lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ , )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Dict = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : List[str] = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __lowercase ( self : str ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def __lowercase ( self : List[str] ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.ones((7_68, 7_68) , dtype=np.floataa )
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : Dict = '''a hat'''
SCREAMING_SNAKE_CASE : str = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Dict = pipeline.to(lowerCAmelCase__ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = pipe_prior(
lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE : Tuple = pipeline(
lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 527 | 0 |
from __future__ import annotations
snake_case : Optional[Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCAmelCase__ :
def __init__( self : Optional[Any] , _A : dict[str, list[str]] , _A : str):
A__ : Optional[Any] = graph
# mapping node to its parent in resulting breadth first tree
A__ : dict[str, str | None] = {}
A__ : List[str] = source_vertex
def _lowercase ( self : List[Any]):
A__ : str = {self.source_vertex}
A__ : List[str] = None
A__ : List[str] = [self.source_vertex] # first in first out queue
while queue:
A__ : int = queue.pop(0)
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_A)
A__ : Any = vertex
queue.append(_A)
def _lowercase ( self : int , _A : str):
if target_vertex == self.source_vertex:
return self.source_vertex
A__ : List[Any] = self.parent.get(_A)
if target_vertex_parent is None:
A__ : Union[str, Any] = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(_A)
return self.shortest_path(_A) + F'->{target_vertex}'
if __name__ == "__main__":
snake_case : Any = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo')) | 182 |
from __future__ import annotations
snake_case : Optional[Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCAmelCase__ :
def __init__( self : Optional[Any] , _A : dict[str, list[str]] , _A : str):
A__ : Optional[Any] = graph
# mapping node to its parent in resulting breadth first tree
A__ : dict[str, str | None] = {}
A__ : List[str] = source_vertex
def _lowercase ( self : List[Any]):
A__ : str = {self.source_vertex}
A__ : List[str] = None
A__ : List[str] = [self.source_vertex] # first in first out queue
while queue:
A__ : int = queue.pop(0)
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_A)
A__ : Any = vertex
queue.append(_A)
def _lowercase ( self : int , _A : str):
if target_vertex == self.source_vertex:
return self.source_vertex
A__ : List[Any] = self.parent.get(_A)
if target_vertex_parent is None:
A__ : Union[str, Any] = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(_A)
return self.shortest_path(_A) + F'->{target_vertex}'
if __name__ == "__main__":
snake_case : Any = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo')) | 182 | 1 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : str , _UpperCamelCase : List[Any] ) -> Dict:
'''simple docstring'''
if gpta_config_file == "":
SCREAMING_SNAKE_CASE = GPTaConfig()
else:
SCREAMING_SNAKE_CASE = GPTaConfig.from_json_file(_UpperCamelCase )
SCREAMING_SNAKE_CASE = GPTaModel(_UpperCamelCase )
# Load weights from numpy
load_tf_weights_in_gpta(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
SCREAMING_SNAKE_CASE = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
SCREAMING_SNAKE_CASE = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , _UpperCamelCase )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
a_ : Union[str, Any] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 439 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class UpperCamelCase :
__UpperCamelCase =BlenderbotSmallConfig
__UpperCamelCase ={}
__UpperCamelCase ="gelu"
def __init__( self : Optional[int] , snake_case__ : Dict , snake_case__ : Optional[int]=1_3 , snake_case__ : Optional[int]=7 , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=False , snake_case__ : int=9_9 , snake_case__ : Any=3_2 , snake_case__ : Tuple=2 , snake_case__ : Optional[int]=4 , snake_case__ : int=3_7 , snake_case__ : Optional[Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : Any=2_0 , snake_case__ : List[Any]=2 , snake_case__ : int=1 , snake_case__ : int=0 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = eos_token_id
SCREAMING_SNAKE_CASE = pad_token_id
SCREAMING_SNAKE_CASE = bos_token_id
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE = prepare_blenderbot_small_inputs_dict(snake_case__ , snake_case__ , snake_case__ )
return config, inputs_dict
def UpperCamelCase ( self : Dict , snake_case__ : Tuple , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFBlenderbotSmallModel(config=snake_case__ ).get_decoder()
SCREAMING_SNAKE_CASE = inputs_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids[:1, :]
SCREAMING_SNAKE_CASE = inputs_dict['attention_mask'][:1, :]
SCREAMING_SNAKE_CASE = inputs_dict['head_mask']
SCREAMING_SNAKE_CASE = 1
# first forward pass
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , head_mask=snake_case__ , use_cache=snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )[0]
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case__ , snake_case__ , rtol=1E-3 )
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : Dict=None , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Any=None , _UpperCamelCase : List[str]=None , _UpperCamelCase : Optional[Any]=None , ) -> str:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(_UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__UpperCamelCase =(TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__UpperCamelCase =(
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
"translation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCamelCase =True
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFBlenderbotSmallModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case__ )
@require_tokenizers
@require_tf
class UpperCamelCase ( unittest.TestCase ):
__UpperCamelCase =[
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like "
" i'm going to throw up.\nand why is that?"
]
__UpperCamelCase ="facebook/blenderbot_small-90M"
@cached_property
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
return BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
@cached_property
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , return_tensors='tf' )
SCREAMING_SNAKE_CASE = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=snake_case__ , )
SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=snake_case__ )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 439 | 1 |
__lowerCAmelCase = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def snake_case_ ( snake_case , snake_case , snake_case ) -> list[str]:
lowercase__: List[Any] = set()
# keep track of all the paths to be checked
lowercase__: Tuple = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
lowercase__: Tuple = queue.pop(0 )
# get the last node from the path
lowercase__: Dict = path[-1]
if node not in explored:
lowercase__: int = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
lowercase__: Optional[int] = list(lowercase_ )
new_path.append(lowercase_ )
queue.append(lowercase_ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(lowercase_ )
# in case there's no path between the 2 nodes
return []
def snake_case_ ( snake_case , snake_case , snake_case ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
lowercase__: int = [start]
lowercase__: Tuple = set(lowercase_ )
# Keep tab on distances from `start` node.
lowercase__: str = {start: 0, target: -1}
while queue:
lowercase__: Union[str, Any] = queue.pop(0 )
if node == target:
lowercase__: Optional[Any] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(lowercase_ )
queue.append(lowercase_ )
lowercase__: Tuple = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 707 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __a ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__: int = TFXLMRobertaModel.from_pretrained('jplu/tf-xlm-roberta-base' )
lowercase__: Any = {
'input_ids': tf.convert_to_tensor([[0, 2_646, 10_269, 83, 99_942, 2]] , dtype=tf.intaa ), # "My dog is cute"
'attention_mask': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
lowercase__: Tuple = model(lowerCAmelCase__ )['last_hidden_state']
lowercase__: Optional[int] = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , lowerCAmelCase__ )
# compare the actual values for a slice.
lowercase__: Tuple = tf.convert_to_tensor(
[
[
[0.0_6_8_1_7_6_2, 0.1_0_8_9_4_4_5_1, 0.0_6_7_7_2_5_0_4],
[-0.0_6_4_2_3_6_6_8, 0.0_2_3_6_6_6_1_5, 0.0_4_3_2_9_3_4_4],
[-0.0_6_0_5_7_2_9_5, 0.0_9_9_7_4_1_3_5, -0.0_0_0_7_0_5_8_4],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 335 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
A = logging.get_logger(__name__)
A = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class a__ ( __magic_name__ ):
lowercase_ = "gpt_neo"
lowercase_ = ["past_key_values"]
lowercase_ = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : str , UpperCamelCase_ : str=50257 , UpperCamelCase_ : Optional[int]=2048 , UpperCamelCase_ : Dict=2048 , UpperCamelCase_ : int=24 , UpperCamelCase_ : int=[[["global", "local"], 12]] , UpperCamelCase_ : Dict=16 , UpperCamelCase_ : str=None , UpperCamelCase_ : int=256 , UpperCamelCase_ : str="gelu_new" , UpperCamelCase_ : str=0.0 , UpperCamelCase_ : Dict=0.0 , UpperCamelCase_ : Tuple=0.0 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Dict=1e-5 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[Any]=50256 , UpperCamelCase_ : Optional[Any]=50256 , **UpperCamelCase_ : str , ):
"""simple docstring"""
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : str = max_position_embeddings
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : Any = num_layers
__UpperCAmelCase : int = num_heads
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : Union[str, Any] = window_size
__UpperCAmelCase : Tuple = activation_function
__UpperCAmelCase : List[str] = resid_dropout
__UpperCAmelCase : Optional[int] = embed_dropout
__UpperCAmelCase : Tuple = attention_dropout
__UpperCAmelCase : Optional[Any] = classifier_dropout
__UpperCAmelCase : int = layer_norm_epsilon
__UpperCAmelCase : Any = initializer_range
__UpperCAmelCase : Union[str, Any] = use_cache
__UpperCAmelCase : Union[str, Any] = bos_token_id
__UpperCAmelCase : List[Any] = eos_token_id
__UpperCAmelCase : Union[str, Any] = attention_types
__UpperCAmelCase : Dict = self.expand_attention_types_params(UpperCamelCase_)
if len(self.attention_layers) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
F"but is `len(config.attention_layers) = {len(self.attention_layers)}`, "
F"`config.num_layers = {self.num_layers}`. "
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument.")
super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_)
@staticmethod
def a_ ( UpperCamelCase_ : Any):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = []
for item in attention_types:
for _ in range(item[1]):
attentions.extend(item[0])
return attentions
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
"""simple docstring"""
import torch
__UpperCAmelCase : Any = input.size()
__UpperCAmelCase : Dict = len(UpperCamelCase )
__UpperCAmelCase : Any = shape[dimension]
__UpperCAmelCase : List[Any] = torch.arange(0 , UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Optional[Any] = torch.div(sizedim - size , UpperCamelCase , rounding_mode="floor" ) + 1
__UpperCAmelCase : str = torch.arange(UpperCamelCase ) + low_indices[:min_length][:, None]
__UpperCAmelCase : List[str] = [slice(UpperCamelCase )] * rank
__UpperCAmelCase : List[Any] = indices
__UpperCAmelCase : Dict = input[s]
__UpperCAmelCase : List[str] = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase )
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
import torch
__UpperCAmelCase : str = torch.arange(1 , UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = torch.remainder(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Optional[int] = remainders == 0
__UpperCAmelCase : Optional[Any] = candidates[divisor_indices]
__UpperCAmelCase : List[str] = torch.max(UpperCamelCase )
return largest_divisor, torch.div(UpperCamelCase , UpperCamelCase , rounding_mode="floor" )
class a__ ( __magic_name__ ):
@property
def a_ ( self : Any):
"""simple docstring"""
__UpperCAmelCase : int = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}})
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase_ , direction="inputs")
__UpperCAmelCase : Dict = {0: "batch", 1: "past_sequence + sequence"}
else:
__UpperCAmelCase : List[Any] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def a_ ( self : int):
"""simple docstring"""
return self._config.num_heads
def a_ ( self : Dict , UpperCamelCase_ : PreTrainedTokenizer , UpperCamelCase_ : int = -1 , UpperCamelCase_ : int = -1 , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[TensorType] = None , ):
"""simple docstring"""
__UpperCAmelCase : int = super(UpperCamelCase_ , self).generate_dummy_inputs(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_)
# We need to order the input in the way they appears in the forward()
__UpperCAmelCase : str = OrderedDict({"input_ids": common_inputs["input_ids"]})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
__UpperCAmelCase , __UpperCAmelCase : str = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__UpperCAmelCase : str = seqlen + 2
__UpperCAmelCase : int = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__UpperCAmelCase : Dict = [
(torch.zeros(UpperCamelCase_), torch.zeros(UpperCamelCase_)) for _ in range(self.num_layers)
]
__UpperCAmelCase : Dict = common_inputs["attention_mask"]
if self.use_past:
__UpperCAmelCase : str = ordered_inputs["attention_mask"].dtype
__UpperCAmelCase : List[Any] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(UpperCamelCase_ , UpperCamelCase_ , dtype=UpperCamelCase_)] , dim=1)
return ordered_inputs
@property
def a_ ( self : Any):
"""simple docstring"""
return 13
| 77 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a__ ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Optional[int]=32 , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : List[str]=10 , UpperCamelCase_ : str=[10, 20, 30, 40] , UpperCamelCase_ : Tuple=[1, 1, 2, 1] , UpperCamelCase_ : str=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Dict="relu" , UpperCamelCase_ : str=3 , UpperCamelCase_ : int=None , ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : List[str] = image_size
__UpperCAmelCase : Tuple = num_channels
__UpperCAmelCase : Union[str, Any] = embeddings_size
__UpperCAmelCase : Dict = hidden_sizes
__UpperCAmelCase : Dict = depths
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_labels
__UpperCAmelCase : Optional[int] = hidden_act
__UpperCAmelCase : str = num_labels
__UpperCAmelCase : Optional[int] = scope
__UpperCAmelCase : Dict = len(UpperCamelCase_)
def a_ ( self : Any):
"""simple docstring"""
__UpperCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__UpperCAmelCase : Dict = self.get_config()
return config, pixel_values
def a_ ( self : Dict):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def a_ ( self : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : List[str] = FlaxRegNetModel(config=UpperCamelCase_)
__UpperCAmelCase : Dict = model(UpperCamelCase_)
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a_ ( self : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = self.num_labels
__UpperCAmelCase : Tuple = FlaxRegNetForImageClassification(config=UpperCamelCase_)
__UpperCAmelCase : str = model(UpperCamelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Any = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs
__UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class a__ ( __magic_name__ , unittest.TestCase ):
lowercase_ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowercase_ = False
lowercase_ = False
lowercase_ = False
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Tuple = FlaxRegNetModelTester(self)
__UpperCAmelCase : str = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_)
def a_ ( self : Dict):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a_ ( self : Tuple):
"""simple docstring"""
return
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_)
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_)
@unittest.skip(reason="RegNet does not use inputs_embeds")
def a_ ( self : Union[str, Any]):
"""simple docstring"""
pass
@unittest.skip(reason="RegNet does not support input and output embeddings")
def a_ ( self : Optional[int]):
"""simple docstring"""
pass
def a_ ( self : str):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : int = model_class(UpperCamelCase_)
__UpperCAmelCase : Optional[int] = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Any = [*signature.parameters.keys()]
__UpperCAmelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase_)
def a_ ( self : int):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any]):
__UpperCAmelCase : Union[str, Any] = model_class(UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_))
__UpperCAmelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : str = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase_) , expected_num_stages + 1)
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : List[str] = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Optional[int] = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
__UpperCAmelCase : List[Any] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_)
__UpperCAmelCase : Optional[int] = model_class(UpperCamelCase_)
@jax.jit
def model_jitted(UpperCamelCase_ : int , **UpperCamelCase_ : Optional[int]):
return model(pixel_values=UpperCamelCase_ , **UpperCamelCase_)
with self.subTest("JIT Enabled"):
__UpperCAmelCase : Optional[Any] = model_jitted(**UpperCamelCase_).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
__UpperCAmelCase : Dict = model_jitted(**UpperCamelCase_).to_tuple()
self.assertEqual(len(UpperCamelCase_) , len(UpperCamelCase_))
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def _UpperCamelCase ( ) -> Any:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_flax
class a__ ( unittest.TestCase ):
@cached_property
def a_ ( self : Optional[int]):
"""simple docstring"""
return AutoImageProcessor.from_pretrained("facebook/regnet-y-040") if is_vision_available() else None
@slow
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : Any = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040")
__UpperCAmelCase : Dict = self.default_image_processor
__UpperCAmelCase : str = prepare_img()
__UpperCAmelCase : int = image_processor(images=UpperCamelCase_ , return_tensors="np")
__UpperCAmelCase : Dict = model(**UpperCamelCase_)
# verify the logits
__UpperCAmelCase : Dict = (1, 1000)
self.assertEqual(outputs.logits.shape , UpperCamelCase_)
__UpperCAmelCase : Any = jnp.array([-0.4180, -1.5051, -3.4836])
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1e-4))
| 77 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {
'''configuration_jukebox''': [
'''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''JukeboxConfig''',
'''JukeboxPriorConfig''',
'''JukeboxVQVAEConfig''',
],
'''tokenization_jukebox''': ['''JukeboxTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''JukeboxModel''',
'''JukeboxPreTrainedModel''',
'''JukeboxVQVAE''',
'''JukeboxPrior''',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 143 |
from math import pow
def UpperCAmelCase__ ( _A , _A , _A , _A , _A , ):
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
a_ = int(pow(_A , _A ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
a_ , a_ = backtrack(
_A , _A , current_number + 1 , _A , _A )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
a_ , a_ = backtrack(
_A , _A , current_number + 1 , _A , _A )
return current_sum, solutions_count
def UpperCAmelCase__ ( _A , _A ):
"""simple docstring"""
if not (1 <= needed_sum <= 1_000 and 2 <= power <= 10):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(_A , _A , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 143 | 1 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->Tuple:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
UpperCAmelCase = flax_key_tuple[:-1] + ("""weight""",)
UpperCAmelCase = torch.permute(lowerCAmelCase_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowerCAmelCase_ ):
# linear layer
UpperCAmelCase = flax_key_tuple[:-1] + ("""weight""",)
UpperCAmelCase = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCAmelCase = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ->str:
if "metadata" in layer:
UpperCAmelCase = layer.split("""metadata""" )
UpperCAmelCase = """""".join(split_layer[0] )[:-1]
UpperCAmelCase = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
UpperCAmelCase = layer.split("""kvstore""" )
UpperCAmelCase = """""".join(split_layer[0] )[:-1]
UpperCAmelCase = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
UpperCAmelCase = layer.split("""/""" )
UpperCAmelCase = """/""".join(split_layer[:-1] )
UpperCAmelCase = (split_layer[-1],)
if "kvstore/path" in layer:
UpperCAmelCase = F"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
UpperCAmelCase = """file"""
else:
UpperCAmelCase = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->int:
UpperCAmelCase = rename_keys(lowerCAmelCase_ )
UpperCAmelCase = {}
for k, v in current_block.items():
UpperCAmelCase = v
UpperCAmelCase = new_current_block
torch.save(lowerCAmelCase_ , lowerCAmelCase_ )
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = WEIGHTS_NAME ) ->Optional[int]:
UpperCAmelCase = convert_file_size_to_int(lowerCAmelCase_ )
UpperCAmelCase = []
UpperCAmelCase = {}
UpperCAmelCase = 0
UpperCAmelCase = 0
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
UpperCAmelCase = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
UpperCAmelCase = flatten_dict(lowerCAmelCase_ , sep="""/""" )
UpperCAmelCase = {}
for layer in checkpoint_info.keys():
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = get_key_and_tensorstore_dict(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if curr_real_layer_name in all_layers:
UpperCAmelCase = content
else:
UpperCAmelCase = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
UpperCAmelCase = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
UpperCAmelCase = torch.tensor(lowerCAmelCase_ )
UpperCAmelCase = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
UpperCAmelCase , UpperCAmelCase = rename_base_flax_keys(tuple(key.split("""/""" ) ) , lowerCAmelCase_ )
UpperCAmelCase = """/""".join(lowerCAmelCase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
UpperCAmelCase = os.path.join(
lowerCAmelCase_ , weights_name.replace(""".bin""" , F"""-{len(lowerCAmelCase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowerCAmelCase_ , lowerCAmelCase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
UpperCAmelCase = {}
UpperCAmelCase = 0
UpperCAmelCase = raw_weights.to(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
UpperCAmelCase = os.path.join(lowerCAmelCase_ , weights_name.replace(""".bin""" , F"""-{len(lowerCAmelCase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowerCAmelCase_ , lowerCAmelCase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowerCAmelCase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
UpperCAmelCase = {}
UpperCAmelCase = {}
for idx, shard in enumerate(lowerCAmelCase_ ):
UpperCAmelCase = weights_name.replace(
""".bin""" , F"""-{idx+1:05d}-of-{len(lowerCAmelCase_ ):05d}.bin""" ) # len(sharded_state_dicts):05d}
UpperCAmelCase = os.path.join(lowerCAmelCase_ , weights_name.replace(""".bin""" , F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) )
UpperCAmelCase = shard
for key in shard:
UpperCAmelCase = shard_file
# Add the metadata
UpperCAmelCase = {"""total_size""": total_size}
UpperCAmelCase = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , """w""" , encoding="""utf-8""" ) as f:
UpperCAmelCase = json.dumps(lowerCAmelCase_ , indent=2 , sort_keys=lowerCAmelCase_ ) + """\n"""
f.write(lowerCAmelCase_ )
return metadata, index
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""")
parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
__a = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def _UpperCamelCase ( ) ->Optional[int]:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
UpperCAmelCase = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
UpperCAmelCase = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
UpperCAmelCase = TaTokenizer.from_pretrained("""t5-small""" )
UpperCAmelCase = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
UpperCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors="""pt""" ).input_ids
UpperCAmelCase = model.generate(lowerCAmelCase_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 377 |
__a = 256
# Modulus to hash a string
__a = 100_0003
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->bool:
UpperCAmelCase = len(lowerCAmelCase_ )
UpperCAmelCase = len(lowerCAmelCase_ )
if p_len > t_len:
return False
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 1
# Calculating the hash of pattern and substring of text
for i in range(lowerCAmelCase_ ):
UpperCAmelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
UpperCAmelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
UpperCAmelCase = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
UpperCAmelCase = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _UpperCamelCase ( ) ->None:
UpperCAmelCase = """abc1abc12"""
UpperCAmelCase = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
UpperCAmelCase = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(lowerCAmelCase_ , lowerCAmelCase_ ) and not rabin_karp(lowerCAmelCase_ , lowerCAmelCase_ )
# Test 2)
UpperCAmelCase = """ABABX"""
UpperCAmelCase = """ABABZABABYABABX"""
assert rabin_karp(lowerCAmelCase_ , lowerCAmelCase_ )
# Test 3)
UpperCAmelCase = """AAAB"""
UpperCAmelCase = """ABAAAAAB"""
assert rabin_karp(lowerCAmelCase_ , lowerCAmelCase_ )
# Test 4)
UpperCAmelCase = """abcdabcy"""
UpperCAmelCase = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(lowerCAmelCase_ , lowerCAmelCase_ )
# Test 5)
UpperCAmelCase = """Lü"""
UpperCAmelCase = """Lüsai"""
assert rabin_karp(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase = """Lue"""
assert not rabin_karp(lowerCAmelCase_ , lowerCAmelCase_ )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 377 | 1 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self: List[Any] , __A: Any ) -> Dict:
_A = parent
def __A ( self: Union[str, Any] ) -> List[Any]:
return {}
def __A ( ):
'''simple docstring'''
_A = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
_A = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = MarkupLMFeatureExtractor if is_bsa_available() else None
def __A ( self: Optional[int] ) -> int:
_A = MarkupLMFeatureExtractionTester(self )
@property
def __A ( self: Optional[Any] ) -> int:
return self.feature_extract_tester.prepare_feat_extract_dict()
def __A ( self: List[Any] ) -> Optional[int]:
# Initialize feature_extractor
_A = self.feature_extraction_class()
# Test not batched input
_A = get_html_strings()[0]
_A = feature_extractor(__A )
# fmt: off
_A = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
_A = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , __A )
self.assertEqual(encoding.xpaths , __A )
# Test batched
_A = get_html_strings()
_A = feature_extractor(__A )
# fmt: off
_A = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
_A = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , __A )
self.assertEqual(encoding.xpaths , __A )
| 62 |
def __A ( _lowercase = 1_00_00_00 ):
'''simple docstring'''
_A = 1
_A = 1
_A = {1: 1}
for inputa in range(2 , _lowercase ):
_A = 0
_A = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_A = (3 * number) + 1
counter += 1
if inputa not in counters:
_A = counter
if counter > pre_counter:
_A = inputa
_A = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 62 | 1 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Optional[int] = (CMStochasticIterativeScheduler,)
__lowerCAmelCase : int = 10
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Dict = {
"""num_train_timesteps""": 2_01,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
config.update(**SCREAMING_SNAKE_CASE_)
return config
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = 10
lowercase__ : Union[str, Any] = self.get_scheduler_config()
lowercase__ : List[str] = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE_)
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_)
lowercase__ : int = scheduler.timesteps[0]
lowercase__ : str = scheduler.timesteps[1]
lowercase__ : List[Any] = self.dummy_sample
lowercase__ : str = 0.1 * sample
lowercase__ : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_).prev_sample
lowercase__ : int = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def lowercase__ ( self):
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = self.scheduler_classes[0]
lowercase__ : int = self.get_scheduler_config()
lowercase__ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = 1
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = scheduler.timesteps
lowercase__ : int = torch.manual_seed(0)
lowercase__ : List[str] = self.dummy_model()
lowercase__ : str = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(SCREAMING_SNAKE_CASE_):
# 1. scale model input
lowercase__ : Dict = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# 2. predict noise residual
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# 3. predict previous sample x_t-1
lowercase__ : Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_).prev_sample
lowercase__ : int = pred_prev_sample
lowercase__ : int = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_))
lowercase__ : Any = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_))
assert abs(result_sum.item() - 1_9_2.7_6_1_4) < 1E-2
assert abs(result_mean.item() - 0.2_5_1_0) < 1E-3
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = self.scheduler_classes[0]
lowercase__ : Union[str, Any] = self.get_scheduler_config()
lowercase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = [1_06, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = scheduler.timesteps
lowercase__ : List[Any] = torch.manual_seed(0)
lowercase__ : Optional[int] = self.dummy_model()
lowercase__ : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
lowercase__ : Any = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# 2. predict noise residual
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# 3. predict previous sample x_t-1
lowercase__ : Any = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_).prev_sample
lowercase__ : Any = pred_prev_sample
lowercase__ : Optional[int] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_))
lowercase__ : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_))
assert abs(result_sum.item() - 3_4_7.6_3_5_7) < 1E-2
assert abs(result_mean.item() - 0.4_5_2_7) < 1E-3
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = self.scheduler_classes[0]
lowercase__ : Tuple = self.get_scheduler_config()
lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = [39, 30, 12, 15, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""`timesteps` must be in descending order."""):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = self.scheduler_classes[0]
lowercase__ : Dict = self.get_scheduler_config()
lowercase__ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : str = [39, 30, 12, 1, 0]
lowercase__ : str = len(SCREAMING_SNAKE_CASE_)
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`."""):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = self.scheduler_classes[0]
lowercase__ : Dict = self.get_scheduler_config()
lowercase__ : Any = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
| 12 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=2_24 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=4_00 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
lowercase__ : List[str] = size if size is not None else {"""height""": 18, """width""": 18}
lowercase__ : int = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : List[str] = num_channels
lowercase__ : str = image_size
lowercase__ : int = min_resolution
lowercase__ : Dict = max_resolution
lowercase__ : Tuple = do_resize
lowercase__ : Union[str, Any] = size
lowercase__ : Any = do_normalize
lowercase__ : Tuple = image_mean
lowercase__ : str = image_std
def lowercase__ ( self):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _snake_case ( UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Optional[Any] = ViTImageProcessor if is_vision_available() else None
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = EfficientFormerImageProcessorTester(self)
@property
def lowercase__ ( self):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_mean"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_std"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_normalize"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_resize"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """size"""))
def lowercase__ ( self):
'''simple docstring'''
pass
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowercase__ : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image)
# Test not batched input
lowercase__ : int = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
lowercase__ : str = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowercase__ : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray)
# Test not batched input
lowercase__ : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
lowercase__ : Dict = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowercase__ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor)
# Test not batched input
lowercase__ : int = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
lowercase__ : Any = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 12 | 1 |
'''simple docstring'''
from math import sqrt
def _UpperCamelCase ( __A ) -> Dict:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(A__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _UpperCamelCase ( __A = 10001 ) -> str:
'''simple docstring'''
UpperCamelCase__ = 0
UpperCamelCase__ = 1
while count != nth and number < 3:
number += 1
if is_prime(A__ ):
count += 1
while count != nth:
number += 2
if is_prime(A__ ):
count += 1
return number
if __name__ == "__main__":
print(F"""{solution() = }""")
| 714 |
'''simple docstring'''
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def _UpperCamelCase ( __A , __A , __A ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = AutoConfig.from_pretrained(__A )
UpperCamelCase__ = FlaxAutoModelForSeqaSeqLM.from_config(config=__A )
UpperCamelCase__ = checkpoints.load_tax_checkpoint(__A )
UpperCamelCase__ = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
UpperCamelCase__ = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
UpperCamelCase__ = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase__ = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global]." )
# Encoder
for layer_index in range(config.num_layers ):
UpperCamelCase__ = F'''layers_{str(__A )}'''
# Self-Attention
UpperCamelCase__ = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
UpperCamelCase__ = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
UpperCamelCase__ = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
UpperCamelCase__ = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase__ = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
UpperCamelCase__ = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
UpperCamelCase__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
UpperCamelCase__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
UpperCamelCase__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
UpperCamelCase__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
UpperCamelCase__ = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
UpperCamelCase__ = flax_model.params["encoder"]["block"][str(__A )]["layer"]
UpperCamelCase__ = tax_attention_key
UpperCamelCase__ = tax_attention_out
UpperCamelCase__ = tax_attention_query
UpperCamelCase__ = tax_attention_value
UpperCamelCase__ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase__ = tax_global_layer_norm
if split_mlp_wi:
UpperCamelCase__ = tax_mlp_wi_a
UpperCamelCase__ = tax_mlp_wi_a
else:
UpperCamelCase__ = tax_mlp_wi
UpperCamelCase__ = tax_mlp_wo
UpperCamelCase__ = tax_mlp_layer_norm
UpperCamelCase__ = flax_model_encoder_layer_block
# Only for layer 0:
UpperCamelCase__ = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
UpperCamelCase__ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase__ = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
UpperCamelCase__ = tax_encoder_global_rel_embedding
# Assigning
UpperCamelCase__ = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
UpperCamelCase__ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
UpperCamelCase__ = F'''layers_{str(__A )}'''
# Self-Attention
UpperCamelCase__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
UpperCamelCase__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
UpperCamelCase__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
UpperCamelCase__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
UpperCamelCase__ = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
UpperCamelCase__ = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
UpperCamelCase__ = tax_enc_dec_attention_module["key"]["kernel"]
UpperCamelCase__ = tax_enc_dec_attention_module["out"]["kernel"]
UpperCamelCase__ = tax_enc_dec_attention_module["query"]["kernel"]
UpperCamelCase__ = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
UpperCamelCase__ = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
UpperCamelCase__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
UpperCamelCase__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
UpperCamelCase__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
UpperCamelCase__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
UpperCamelCase__ = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
UpperCamelCase__ = flax_model.params["decoder"]["block"][str(__A )]["layer"]
UpperCamelCase__ = tax_attention_key
UpperCamelCase__ = tax_attention_out
UpperCamelCase__ = tax_attention_query
UpperCamelCase__ = tax_attention_value
UpperCamelCase__ = tax_pre_attention_layer_norm
UpperCamelCase__ = tax_enc_dec_attention_key
UpperCamelCase__ = tax_enc_dec_attention_out
UpperCamelCase__ = tax_enc_dec_attention_query
UpperCamelCase__ = tax_enc_dec_attention_value
UpperCamelCase__ = tax_cross_layer_norm
if split_mlp_wi:
UpperCamelCase__ = tax_mlp_wi_a
UpperCamelCase__ = tax_mlp_wi_a
else:
UpperCamelCase__ = tax_mlp_wi
UpperCamelCase__ = tax_mlp_wo
UpperCamelCase__ = txa_mlp_layer_norm
UpperCamelCase__ = flax_model_decoder_layer_block
# Decoder Normalization
UpperCamelCase__ = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
UpperCamelCase__ = txa_decoder_norm
# Only for layer 0:
UpperCamelCase__ = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
UpperCamelCase__ = tax_decoder_rel_embedding
# Token Embeddings
UpperCamelCase__ = tax_model["target"]["token_embedder"]["embedding"]
UpperCamelCase__ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
UpperCamelCase__ = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(__A )
print("T5X Model was sucessfully converted!" )
if __name__ == "__main__":
a__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
a__ : Optional[Any] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 223 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCamelCase :Dict = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :List[str] = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowerCamelCase :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[str] = ['image_processor', 'tokenizer']
__SCREAMING_SNAKE_CASE : Any = 'LayoutLMv3ImageProcessor'
__SCREAMING_SNAKE_CASE : Any = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__(self , lowercase=None , lowercase=None , **lowercase ):
A_ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase , )
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase , lowercase )
def __call__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
A_ : Optional[int] = self.image_processor(images=lowercase , return_tensors=lowercase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
A_ : Dict = features["""words"""]
A_ : Optional[int] = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
# add pixel values
A_ : List[Any] = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
A_ : List[str] = self.get_overflowing_images(lowercase , encoded_inputs["""overflow_to_sample_mapping"""] )
A_ : Optional[int] = images
return encoded_inputs
def _a (self , lowercase , lowercase ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
A_ : str = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase ) != len(lowercase ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
F' {len(lowercase )} and {len(lowercase )}' )
return images_with_overflow
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def _a (self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _a (self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase , )
return self.image_processor_class
@property
def _a (self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowercase , )
return self.image_processor | 667 | 1 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__A = ["gpt2"]
__A = "gpt2"
if is_tf_available():
class __lowerCAmelCase ( tf.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
super().__init__()
__lowerCamelCase = tokenizer
__lowerCamelCase = AutoConfig.from_pretrained(lowerCamelCase__ )
__lowerCamelCase = TFGPTaLMHeadModel.from_config(lowerCamelCase__ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def lowercase_ ( self , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.tokenizer(lowerCamelCase__ )
__lowerCamelCase = tokenized['input_ids'].to_tensor()
__lowerCamelCase = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__lowerCamelCase = self.model(input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ )['logits']
return outputs
@require_tf
@require_keras_nlp
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
super().setUp()
__lowerCamelCase = [GPTaTokenizer.from_pretrained(lowerCamelCase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__lowerCamelCase = [TFGPTaTokenizer.from_pretrained(lowerCamelCase__ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__lowerCamelCase = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
__lowerCamelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__lowerCamelCase = tokenizer([test_inputs] , return_tensors='tf' )
__lowerCamelCase = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__lowerCamelCase = python_outputs[key].numpy()
__lowerCamelCase = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(lowerCamelCase__ , tf.intaa ) == tf_outputs_values ) )
@slow
def lowercase_ ( self ) -> str:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase = tf.function(lowerCamelCase__ )
for test_inputs in self.test_sentences:
__lowerCamelCase = tf.constant(lowerCamelCase__ )
__lowerCamelCase = compiled_tokenizer(lowerCamelCase__ )
__lowerCamelCase = tf_tokenizer(lowerCamelCase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase = ModelToSave(tokenizer=lowerCamelCase__ )
__lowerCamelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCamelCase = model.serving(lowerCamelCase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__lowerCamelCase = Path(lowerCamelCase__ ) / 'saved.model'
tf.saved_model.save(lowerCamelCase__ , lowerCamelCase__ , signatures={'serving_default': model.serving} )
__lowerCamelCase = tf.saved_model.load(lowerCamelCase__ )
__lowerCamelCase = loaded_model.signatures['serving_default'](lowerCamelCase__ )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def lowercase_ ( self ) -> Any:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCamelCase = tf_tokenizer(lowerCamelCase__ ) # Build model with some sample inputs
__lowerCamelCase = tf_tokenizer.get_config()
__lowerCamelCase = TFGPTaTokenizer.from_config(lowerCamelCase__ )
__lowerCamelCase = model_from_config(lowerCamelCase__ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__lowerCamelCase = 123_123
for max_length in [3, 5, 1_024]:
__lowerCamelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCamelCase = tf_tokenizer(lowerCamelCase__ , max_length=lowerCamelCase__ )
__lowerCamelCase = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 709 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = ConsistencyModelPipeline
snake_case_ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
snake_case_ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
snake_case_ = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
@property
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def lowercase_ ( self , lowerCamelCase__=False ) -> Dict:
'''simple docstring'''
if class_cond:
__lowerCamelCase = self.dummy_cond_unet
else:
__lowerCamelCase = self.dummy_uncond_unet
# Default to CM multistep sampler
__lowerCamelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
__lowerCamelCase = {
'unet': unet,
'scheduler': scheduler,
}
return components
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=0 ) -> Optional[Any]:
'''simple docstring'''
if str(lowerCamelCase__ ).startswith('mps' ):
__lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
else:
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__lowerCamelCase = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = ConsistencyModelPipeline(**lowerCamelCase__ )
__lowerCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components(class_cond=lowerCamelCase__ )
__lowerCamelCase = ConsistencyModelPipeline(**lowerCamelCase__ )
__lowerCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
__lowerCamelCase = 0
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = ConsistencyModelPipeline(**lowerCamelCase__ )
__lowerCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
__lowerCamelCase = 1
__lowerCamelCase = None
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components(class_cond=lowerCamelCase__ )
__lowerCamelCase = ConsistencyModelPipeline(**lowerCamelCase__ )
__lowerCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
__lowerCamelCase = 1
__lowerCamelCase = None
__lowerCamelCase = 0
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self , lowerCamelCase__=0 , lowerCamelCase__=False , lowerCamelCase__="cpu" , lowerCamelCase__=torch.floataa , lowerCamelCase__=(1, 3, 64, 64) ) -> int:
'''simple docstring'''
__lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
__lowerCamelCase = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
__lowerCamelCase = self.get_fixed_latents(seed=lowerCamelCase__ , device=lowerCamelCase__ , dtype=lowerCamelCase__ , shape=lowerCamelCase__ )
__lowerCamelCase = latents
return inputs
def lowercase_ ( self , lowerCamelCase__=0 , lowerCamelCase__="cpu" , lowerCamelCase__=torch.floataa , lowerCamelCase__=(1, 3, 64, 64) ) -> Optional[int]:
'''simple docstring'''
if type(lowerCamelCase__ ) == str:
__lowerCamelCase = torch.device(lowerCamelCase__ )
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__lowerCamelCase = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=lowerCamelCase__ , dtype=lowerCamelCase__ )
return latents
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
__lowerCamelCase = ConsistencyModelPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
pipe.to(torch_device=lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
__lowerCamelCase = ConsistencyModelPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
pipe.to(torch_device=lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_inputs()
__lowerCamelCase = 1
__lowerCamelCase = None
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
__lowerCamelCase = ConsistencyModelPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
pipe.to(torch_device=lowerCamelCase__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_inputs(get_fixed_latents=lowerCamelCase__ , device=lowerCamelCase__ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCamelCase__ , enable_math=lowerCamelCase__ , enable_mem_efficient=lowerCamelCase__ ):
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
__lowerCamelCase = ConsistencyModelPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
pipe.to(torch_device=lowerCamelCase__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_inputs(get_fixed_latents=lowerCamelCase__ , device=lowerCamelCase__ )
__lowerCamelCase = 1
__lowerCamelCase = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCamelCase__ , enable_math=lowerCamelCase__ , enable_mem_efficient=lowerCamelCase__ ):
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 167 | 0 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> None:
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = analyze_text(UpperCamelCase__ )
__lowerCamelCase = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
__lowerCamelCase = sum(single_char_strings.values() )
# one length string
__lowerCamelCase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__lowerCamelCase = single_char_strings[ch]
__lowerCamelCase = my_str / all_sum
my_fir_sum += prob * math.loga(UpperCamelCase__ ) # entropy formula.
# print entropy
print(F"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
__lowerCamelCase = sum(two_char_strings.values() )
__lowerCamelCase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__lowerCamelCase = cha + cha
if sequence in two_char_strings:
__lowerCamelCase = two_char_strings[sequence]
__lowerCamelCase = int(UpperCamelCase__ ) / all_sum
my_sec_sum += prob * math.loga(UpperCamelCase__ )
# print second entropy
print(F"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> tuple[dict, dict]:
"""simple docstring"""
__lowerCamelCase = Counter() # type: ignore
__lowerCamelCase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(UpperCamelCase__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowerCamelCase_ ( ) -> Dict:
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 469 |
from sklearn.metrics import mean_squared_error
import datasets
__A = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
__A = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
__A = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] , )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float' ) ),
"references": datasets.Sequence(datasets.Value('float' ) ),
}
else:
return {
"predictions": datasets.Value('float' ),
"references": datasets.Value('float' ),
}
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__="uniform_average" , lowerCamelCase__=True ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = mean_squared_error(
lowerCamelCase__ , lowerCamelCase__ , sample_weight=lowerCamelCase__ , multioutput=lowerCamelCase__ , squared=lowerCamelCase__ )
return {"mse": mse}
| 469 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : str = {
"""configuration_longformer""": [
"""LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LongformerConfig""",
"""LongformerOnnxConfig""",
],
"""tokenization_longformer""": ["""LongformerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Any = ["""LongformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"""LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongformerForMaskedLM""",
"""LongformerForMultipleChoice""",
"""LongformerForQuestionAnswering""",
"""LongformerForSequenceClassification""",
"""LongformerForTokenClassification""",
"""LongformerModel""",
"""LongformerPreTrainedModel""",
"""LongformerSelfAttention""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = [
"""TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLongformerForMaskedLM""",
"""TFLongformerForMultipleChoice""",
"""TFLongformerForQuestionAnswering""",
"""TFLongformerForSequenceClassification""",
"""TFLongformerForTokenClassification""",
"""TFLongformerModel""",
"""TFLongformerPreTrainedModel""",
"""TFLongformerSelfAttention""",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 525 | from __future__ import annotations
class A_ :
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int ):
__a = order
# a_{0} ... a_{k}
__a = [1.0] + [0.0] * order
# b_{0} ... b_{k}
__a = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
__a = [0.0] * self.order
# y[n-1] ... y[n-k]
__a = [0.0] * self.order
def _UpperCAmelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : list[float] , __SCREAMING_SNAKE_CASE : list[float] ):
if len(__SCREAMING_SNAKE_CASE ) < self.order:
__a = [1.0, *a_coeffs]
if len(__SCREAMING_SNAKE_CASE ) != self.order + 1:
__a = (
f"""Expected a_coeffs to have {self.order + 1} elements """
f"""for {self.order}-order filter, got {len(__SCREAMING_SNAKE_CASE )}"""
)
raise ValueError(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) != self.order + 1:
__a = (
f"""Expected b_coeffs to have {self.order + 1} elements """
f"""for {self.order}-order filter, got {len(__SCREAMING_SNAKE_CASE )}"""
)
raise ValueError(__SCREAMING_SNAKE_CASE )
__a = a_coeffs
__a = b_coeffs
def _UpperCAmelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : float ):
__a = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
__a = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
__a = self.input_history[:-1]
__a = self.output_history[:-1]
__a = sample
__a = result
return result
| 525 | 1 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
A_ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
A_ = typing.Union[np.floataa, int, float] # noqa: UP007
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return np.sqrt(np.sum((np.asarray(lowerCAmelCase__ ) - np.asarray(lowerCAmelCase__ )) ** 2 ) )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return sum((va - va) ** 2 for va, va in zip(lowerCAmelCase__ ,lowerCAmelCase__ ) ) ** (1 / 2)
if __name__ == "__main__":
def lowercase ( ):
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' ,number=10_000 ,globals=globals() ,) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' ,number=10_000 ,globals=globals() ,) )
benchmark()
| 29 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def _lowerCAmelCase ( lowercase = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2
def _lowerCAmelCase ( lowercase = "" ) -> bool:
if len(lowercase ) == 0:
return True
__lowerCAmelCase = input_str.replace(""" """ , """""" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__lowerCAmelCase = {}
for character in lower_case_input_str:
__lowerCAmelCase = character_freq_dict.get(lowercase , 0 ) + 1
__lowerCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _lowerCAmelCase ( lowercase = "" ) -> None:
print("""\nFor string = """ , lowercase , """:""" )
print(
"""> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(lowercase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
print(
"""> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(lowercase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
if __name__ == "__main__":
_a : int = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
_a : Optional[int] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
| 689 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase__( snake_case_ ):
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCAmelCase , """embed_dim""" ) )
self.parent.assertTrue(hasattr(__UpperCAmelCase , """num_heads""" ) )
class lowerCamelCase__:
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=1_3 , __UpperCAmelCase=6_4 , __UpperCAmelCase=3 , __UpperCAmelCase=[1_6, 4_8, 9_6] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 1_0] , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=2 , ):
"""simple docstring"""
__lowercase =parent
__lowercase =batch_size
__lowercase =image_size
__lowercase =patch_sizes
__lowercase =patch_stride
__lowercase =patch_padding
__lowercase =is_training
__lowercase =use_labels
__lowercase =num_labels
__lowercase =num_channels
__lowercase =embed_dim
__lowercase =num_heads
__lowercase =stride_kv
__lowercase =depth
__lowercase =cls_token
__lowercase =attention_drop_rate
__lowercase =initializer_range
__lowercase =layer_norm_eps
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase =None
if self.use_labels:
# create a random int32 tensor of given shape
__lowercase =ids_tensor([self.batch_size] , self.num_labels )
__lowercase =self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self ):
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__lowercase =TFCvtModel(config=__UpperCAmelCase )
__lowercase =model(__UpperCAmelCase , training=__UpperCAmelCase )
__lowercase =(self.image_size, self.image_size)
__lowercase , __lowercase =image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__lowercase =floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__lowercase =floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__lowercase =self.num_labels
__lowercase =TFCvtForImageClassification(__UpperCAmelCase )
__lowercase =model(__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase =self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase =config_and_inputs
__lowercase ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase__( snake_case_ , snake_case_ , unittest.TestCase ):
UpperCamelCase : Tuple = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
UpperCamelCase : Tuple = (
{"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase : List[str] = False
UpperCamelCase : List[Any] = False
UpperCamelCase : Any = False
UpperCamelCase : Optional[int] = False
UpperCamelCase : int = False
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase =TFCvtModelTester(self )
__lowercase =TFCvtConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=3_7 )
def __magic_name__ ( self ):
"""simple docstring"""
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def __magic_name__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def __magic_name__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def __magic_name__ ( self ):
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def __magic_name__ ( self ):
"""simple docstring"""
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def __magic_name__ ( self ):
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase =tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(__UpperCAmelCase )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase , __lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase =model_class(__UpperCAmelCase )
__lowercase =inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase =[*signature.parameters.keys()]
__lowercase =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__lowercase =model_class(__UpperCAmelCase )
__lowercase =model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__lowercase =outputs.hidden_states
__lowercase =len(self.model_tester.depth )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__lowercase , __lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase =True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase =True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
@slow
def __magic_name__ ( self ):
"""simple docstring"""
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase =TFCvtModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def lowercase__ ( ):
'''simple docstring'''
__lowercase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class lowerCamelCase__( unittest.TestCase ):
@cached_property
def __magic_name__ ( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase =TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__lowercase =self.default_image_processor
__lowercase =prepare_img()
__lowercase =image_processor(images=__UpperCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase =model(**__UpperCAmelCase )
# verify the logits
__lowercase =tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
__lowercase =tf.constant([0.92_85, 0.90_15, -0.31_50] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __UpperCAmelCase , atol=1E-4 ) )
| 709 |
'''simple docstring'''
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowercase__ ( __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : bool = False ):
'''simple docstring'''
if radian_mode:
return [magnitude * cos(__UpperCamelCase ), magnitude * sin(__UpperCamelCase )]
return [magnitude * cos(radians(__UpperCamelCase ) ), magnitude * sin(radians(__UpperCamelCase ) )]
def lowercase__ ( __UpperCamelCase : NDArray[floataa] , __UpperCamelCase : NDArray[floataa] , __UpperCamelCase : float = 10**-1 ):
'''simple docstring'''
__lowercase = cross(__UpperCamelCase , __UpperCamelCase )
__lowercase = sum(__UpperCamelCase )
return abs(__UpperCamelCase ) < eps
if __name__ == "__main__":
# Test to check if it works
snake_case : List[Any] = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
snake_case : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
snake_case : List[Any] = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
snake_case : List[Any] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
snake_case : List[Any] = array([[0, -2_000], [0, -1_200], [0, 15_600], [0, -12_400]])
snake_case : str = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 339 | 0 |
__A = tuple[float, float, float]
__A = tuple[float, float, float]
def lowerCAmelCase_ ( __a , __a ) -> Vectorad:
"""simple docstring"""
lowerCamelCase__: Optional[int] =end_pointa[0] - end_pointa[0]
lowerCamelCase__: Any =end_pointa[1] - end_pointa[1]
lowerCamelCase__: str =end_pointa[2] - end_pointa[2]
return (x, y, z)
def lowerCAmelCase_ ( __a , __a ) -> Vectorad:
"""simple docstring"""
lowerCamelCase__: List[str] =ab[1] * ac[2] - ab[2] * ac[1] # *i
lowerCamelCase__: Tuple =(ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
lowerCamelCase__: Tuple =ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def lowerCAmelCase_ ( __a , __a ) -> bool:
"""simple docstring"""
return tuple(round(__a , __a ) for x in vector ) == (0, 0, 0)
def lowerCAmelCase_ ( __a , __a , __a , __a = 10 ) -> bool:
"""simple docstring"""
lowerCamelCase__: List[Any] =create_vector(__a , __a )
lowerCamelCase__: Dict =create_vector(__a , __a )
return is_zero_vector(get_ad_vectors_cross(__a , __a ) , __a )
| 59 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
a__ = logging.get_logger(__name__)
a__ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a__ = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
a__ = {
"""google/realm-cc-news-pretrained-embedder""": 5_12,
"""google/realm-cc-news-pretrained-encoder""": 5_12,
"""google/realm-cc-news-pretrained-scorer""": 5_12,
"""google/realm-cc-news-pretrained-openqa""": 5_12,
"""google/realm-orqa-nq-openqa""": 5_12,
"""google/realm-orqa-nq-reader""": 5_12,
"""google/realm-orqa-wq-openqa""": 5_12,
"""google/realm-orqa-wq-reader""": 5_12,
}
a__ = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = VOCAB_FILES_NAMES
snake_case_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case_ : Any = PRETRAINED_INIT_CONFIGURATION
snake_case_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ : Dict = RealmTokenizer
def __init__( self : int , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Union[str, Any]="[UNK]" , lowerCAmelCase : List[str]="[SEP]" , lowerCAmelCase : Optional[int]="[PAD]" , lowerCAmelCase : List[Any]="[CLS]" , lowerCAmelCase : Any="[MASK]" , lowerCAmelCase : Dict=True , lowerCAmelCase : int=None , **lowerCAmelCase : Tuple , ) -> List[str]:
"""simple docstring"""
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
_snake_case : str = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("""lowercase""" , lowerCAmelCase) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCAmelCase) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCAmelCase) != tokenize_chinese_chars
):
_snake_case : Tuple = getattr(lowerCAmelCase , normalizer_state.pop("""type"""))
_snake_case : Any = do_lower_case
_snake_case : Optional[int] = strip_accents
_snake_case : str = tokenize_chinese_chars
_snake_case : List[str] = normalizer_class(**lowerCAmelCase)
_snake_case : int = do_lower_case
def UpperCamelCase_ ( self : Dict , lowerCAmelCase : List[str] , **lowerCAmelCase : Tuple) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Dict = PaddingStrategy.MAX_LENGTH
_snake_case : Any = text
_snake_case : List[Any] = kwargs.pop("""text_pair""" , lowerCAmelCase)
_snake_case : Union[str, Any] = kwargs.pop("""return_tensors""" , lowerCAmelCase)
_snake_case : Optional[Any] = {
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(lowerCAmelCase):
if batch_text_pair is not None:
_snake_case : Dict = batch_text_pair[idx]
else:
_snake_case : List[str] = None
_snake_case : Optional[int] = super().__call__(lowerCAmelCase , lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase)
_snake_case : str = encoded_candidates.get("""input_ids""")
_snake_case : Union[str, Any] = encoded_candidates.get("""attention_mask""")
_snake_case : Any = encoded_candidates.get("""token_type_ids""")
if encoded_input_ids is not None:
output_data["input_ids"].append(lowerCAmelCase)
if encoded_attention_mask is not None:
output_data["attention_mask"].append(lowerCAmelCase)
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(lowerCAmelCase)
_snake_case : str = {key: item for key, item in output_data.items() if len(lowerCAmelCase) != 0}
return BatchEncoding(lowerCAmelCase , tensor_type=lowerCAmelCase)
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : Optional[Any]=None) -> List[str]:
"""simple docstring"""
_snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : Dict , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_snake_case : List[Any] = [self.sep_token_id]
_snake_case : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
_snake_case : Dict = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase)
return tuple(lowerCAmelCase)
| 477 | 0 |
"""simple docstring"""
import os
def _lowerCAmelCase ( ) ->List[str]:
with open(os.path.dirname(UpperCAmelCase__ ) + """/grid.txt""" ) as f:
A__ : str = [] # noqa: E741
for _ in range(2_0 ):
l.append([int(UpperCAmelCase__ ) for x in f.readline().split()] )
A__ : List[str] = 0
# right
for i in range(2_0 ):
for j in range(1_7 ):
A__ : str = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
A__ : Dict = temp
# down
for i in range(1_7 ):
for j in range(2_0 ):
A__ : Dict = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
A__ : List[Any] = temp
# diagonal 1
for i in range(1_7 ):
for j in range(1_7 ):
A__ : Tuple = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
A__ : Tuple = temp
# diagonal 2
for i in range(1_7 ):
for j in range(3, 2_0 ):
A__ : Union[str, Any] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
A__ : List[str] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 498 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@property
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
A__ : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : List[Any] = self.dummy_uncond_unet
A__ : Dict = ScoreSdeVeScheduler()
A__ : str = ScoreSdeVePipeline(unet=snake_case , scheduler=snake_case )
sde_ve.to(snake_case )
sde_ve.set_progress_bar_config(disable=snake_case )
A__ : Optional[int] = torch.manual_seed(0 )
A__ : Any = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=snake_case ).images
A__ : List[str] = torch.manual_seed(0 )
A__ : int = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=snake_case , return_dict=snake_case )[
0
]
A__ : int = image[0, -3:, -3:, -1]
A__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ : Optional[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Union[str, Any] = """google/ncsnpp-church-256"""
A__ : Tuple = UNetaDModel.from_pretrained(snake_case )
A__ : int = ScoreSdeVeScheduler.from_pretrained(snake_case )
A__ : List[Any] = ScoreSdeVePipeline(unet=snake_case , scheduler=snake_case )
sde_ve.to(snake_case )
sde_ve.set_progress_bar_config(disable=snake_case )
A__ : Dict = torch.manual_seed(0 )
A__ : Optional[int] = sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=snake_case ).images
A__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
A__ : Dict = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 498 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowercase_ :
a_ = BlenderbotSmallConfig
a_ = {}
a_ = """gelu"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__=1_3 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=9_9 , UpperCamelCase__=3_2 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=3_7 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=2_0 , UpperCamelCase__=2 , UpperCamelCase__=1 , UpperCamelCase__=0 , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = eos_token_id
UpperCAmelCase_ = pad_token_id
UpperCAmelCase_ = bos_token_id
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase_ = prepare_blenderbot_small_inputs_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, inputs_dict
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = TFBlenderbotSmallModel(config=UpperCamelCase__ ).get_decoder()
UpperCAmelCase_ = inputs_dict["input_ids"]
UpperCAmelCase_ = input_ids[:1, :]
UpperCAmelCase_ = inputs_dict["attention_mask"][:1, :]
UpperCAmelCase_ = inputs_dict["head_mask"]
UpperCAmelCase_ = 1
# first forward pass
UpperCAmelCase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , head_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase_ = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
UpperCAmelCase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-3 )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=None , A_=None , A_=None , A_=None , A_=None , ):
if attention_mask is None:
UpperCAmelCase_ = tf.cast(tf.math.not_equal(A_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowercase_ ( _A , _A , unittest.TestCase ):
a_ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
a_ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
a_ = (
{
"""conversational""": TFBlenderbotSmallForConditionalGeneration,
"""feature-extraction""": TFBlenderbotSmallModel,
"""summarization""": TFBlenderbotSmallForConditionalGeneration,
"""text2text-generation""": TFBlenderbotSmallForConditionalGeneration,
"""translation""": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
a_ = True
a_ = False
a_ = False
def lowerCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = TFBlenderbotSmallModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__ )
@require_tokenizers
@require_tf
class lowercase_ ( unittest.TestCase ):
a_ = [
"""Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like """
""" i'm going to throw up.\nand why is that?"""
]
a_ = """facebook/blenderbot_small-90M"""
@cached_property
def lowerCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def lowerCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer(self.src_text , return_tensors="tf" )
UpperCAmelCase_ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCamelCase__ , )
UpperCAmelCase_ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase__ )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 660 | '''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : Tuple = {'''vocab_file''': '''spiece.model'''}
__snake_case : Dict = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
__snake_case : Tuple = {
'''AI-Sweden/gpt-sw3-126m''': 20_48,
'''AI-Sweden/gpt-sw3-350m''': 20_48,
'''AI-Sweden/gpt-sw3-1.6b''': 20_48,
'''AI-Sweden/gpt-sw3-6.7b''': 20_48,
'''AI-Sweden/gpt-sw3-20b''': 20_48,
}
class lowercase_ ( _A ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> None:
"""simple docstring"""
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase_ = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
UpperCAmelCase_ = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
UpperCAmelCase_ = "<|endoftext|>" if eos_token is None else eos_token
UpperCAmelCase_ = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
UpperCAmelCase_ = unk_token if pad_token is None else pad_token
UpperCAmelCase_ = eos_token if bos_token is None else bos_token
else:
UpperCAmelCase_ = "<pad>" if pad_token is None else pad_token
UpperCAmelCase_ = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = remove_space
UpperCAmelCase_ = keep_accents
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
# Used for whitespace normalization in input texts
# fmt : off
UpperCAmelCase_ = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
UpperCAmelCase_ = re.compile(
F"""[{"".join(map(UpperCamelCase__ , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]""" )
def __getstate__( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
return len(self.sp_model )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = self.non_printing_characters_re.sub("" , UpperCamelCase__ )
# Normalize whitespaces
UpperCAmelCase_ = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
UpperCAmelCase_ = unicodedata.normalize("NFC" , UpperCamelCase__ )
return text
def lowerCamelCase_ ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.preprocess_text(UpperCamelCase__ )
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> int:
"""simple docstring"""
return self.sp_model.PieceToId(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
return self.sp_model.IdToPiece(UpperCamelCase__ )
@staticmethod
def lowerCamelCase_ ( UpperCamelCase__ ) -> str:
"""simple docstring"""
return out_string
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = ""
UpperCAmelCase_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
UpperCAmelCase_ = True
UpperCAmelCase_ = []
else:
current_sub_tokens.append(UpperCamelCase__ )
UpperCAmelCase_ = False
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string
def lowerCamelCase_ ( self ) -> Dict[str, int]:
"""simple docstring"""
UpperCAmelCase_ = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = self.preprocess_text(UpperCamelCase__ )
UpperCAmelCase_ = self.sp_model.encode(UpperCamelCase__ )
else:
UpperCAmelCase_ = [self.preprocess_text(UpperCamelCase__ ) for t in text]
UpperCAmelCase_ = self.sp_model.encode(UpperCamelCase__ )
if return_tensors is True or return_tensors == "pt":
UpperCAmelCase_ = torch.tensor(UpperCamelCase__ )
return token_ids
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
return self.sp_model.decode(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
UpperCAmelCase_ = (
F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(UpperCamelCase__ ) + F"""{self.bos_token}Bot:"""
)
return self.encode(text=UpperCamelCase__ )
| 660 | 1 |
"""simple docstring"""
__A : List[Any] = [0, 2, 4, 6, 8]
__A : List[Any] = [1, 3, 5, 7, 9]
def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
_UpperCAmelCase = 0
for digit in range(10 ):
_UpperCAmelCase = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , _A , _A )
return result
_UpperCAmelCase = 0
for digita in range(10 ):
_UpperCAmelCase = digita
if (remainder + digita) % 2 == 0:
_UpperCAmelCase = ODD_DIGITS
else:
_UpperCAmelCase = EVEN_DIGITS
for digita in other_parity_digits:
_UpperCAmelCase = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , _A , _A , )
return result
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] = 9 ):
'''simple docstring'''
_UpperCAmelCase = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(_A , 0 , [0] * length , _A )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 719 |
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__A : str = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class _a ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : bool , __UpperCamelCase : str = None , __UpperCamelCase : list = None )->int:
_UpperCAmelCase = None
_UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
_UpperCAmelCase = os.path.abspath('''examples''' )
for item in os.listdir(__UpperCamelCase ):
if item not in EXCLUDE_EXAMPLES:
_UpperCAmelCase = os.path.join(__UpperCamelCase , __UpperCamelCase )
if os.path.isfile(__UpperCamelCase ) and ".py" in item_path:
with self.subTest(
tested_script=__UpperCamelCase , feature_script=__UpperCamelCase , tested_section='''main()''' if parser_only else '''training_function()''' , ):
_UpperCAmelCase = compare_against_test(
os.path.join(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = '''\n'''.join(__UpperCamelCase )
if special_strings is not None:
for string in special_strings:
_UpperCAmelCase = diff.replace(__UpperCamelCase , '''''' )
self.assertEqual(__UpperCamelCase , '''''' )
def lowercase__ ( self : Tuple )->Any:
self.one_complete_example('''complete_nlp_example.py''' , __UpperCamelCase )
self.one_complete_example('''complete_nlp_example.py''' , __UpperCamelCase )
def lowercase__ ( self : Optional[Any] )->int:
_UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
_UpperCAmelCase = [
''' ''' * 1_6 + '''{\n\n''',
''' ''' * 2_0 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 2_0 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 2_0 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 2_0 + '''"epoch": epoch,\n\n''',
''' ''' * 1_6 + '''},\n\n''',
''' ''' * 1_6 + '''step=epoch,\n''',
''' ''' * 1_2,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
self.one_complete_example('''complete_cv_example.py''' , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""})
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = False
@classmethod
def lowercase__ ( cls : Optional[int] )->Optional[Any]:
super().setUpClass()
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
_UpperCAmelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def lowercase__ ( cls : Dict )->Any:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def lowercase__ ( self : Optional[int] )->Any:
_UpperCAmelCase = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def lowercase__ ( self : Optional[int] )->Optional[int]:
_UpperCAmelCase = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
_UpperCAmelCase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def lowercase__ ( self : Optional[Any] )->List[Any]:
_UpperCAmelCase = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
_UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__UpperCamelCase )
self.assertNotIn('''epoch 0:''' , __UpperCamelCase )
self.assertIn('''epoch 1:''' , __UpperCamelCase )
def lowercase__ ( self : List[str] )->str:
_UpperCAmelCase = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
_UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__UpperCamelCase )
if torch.cuda.is_available():
_UpperCAmelCase = torch.cuda.device_count()
else:
_UpperCAmelCase = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __UpperCamelCase )
self.assertIn('''epoch 1:''' , __UpperCamelCase )
else:
self.assertIn('''epoch 0:''' , __UpperCamelCase )
self.assertIn('''epoch 1:''' , __UpperCamelCase )
@slow
def lowercase__ ( self : Dict )->List[Any]:
_UpperCAmelCase = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
_UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__UpperCamelCase )
_UpperCAmelCase = re.findall('''({.+})''' , __UpperCamelCase )
_UpperCAmelCase = [r for r in results if '''accuracy''' in r][-1]
_UpperCAmelCase = ast.literal_eval(__UpperCamelCase )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def lowercase__ ( self : Any )->List[Any]:
_UpperCAmelCase = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase__ ( self : Optional[int] )->Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdir:
_UpperCAmelCase = F'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''tracking''' ) ) )
def lowercase__ ( self : Dict )->Dict:
_UpperCAmelCase = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def lowercase__ ( self : Union[str, Any] )->Tuple:
_UpperCAmelCase = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 95 | 0 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_SCREAMING_SNAKE_CASE = {
'vocab_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/vocab.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/vocab.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/vocab.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/vocab.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/vocab.json',
},
'merges_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/merges.txt',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/merges.txt',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/merges.txt',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/merges.txt',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/merges.txt',
},
'tokenizer_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/tokenizer.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/tokenizer.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/tokenizer.json',
},
}
_SCREAMING_SNAKE_CASE = {
'gpt2': 1024,
'gpt2-medium': 1024,
'gpt2-large': 1024,
'gpt2-xl': 1024,
'distilgpt2': 1024,
}
class _lowerCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
snake_case_ = GPTaTokenizer
def __init__( self : Optional[int] , __snake_case : Optional[int]=None , __snake_case : Dict=None , __snake_case : Optional[int]=None , __snake_case : int="<|endoftext|>" , __snake_case : Dict="<|endoftext|>" , __snake_case : List[Any]="<|endoftext|>" , __snake_case : Optional[Any]=False , **__snake_case : Any , )-> List[str]:
super().__init__(
__lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
snake_case = kwargs.pop("""add_bos_token""" , __lowerCamelCase )
snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __lowerCamelCase ) != add_prefix_space:
snake_case = getattr(__lowerCamelCase , pre_tok_state.pop("""type""" ) )
snake_case = add_prefix_space
snake_case = pre_tok_class(**__lowerCamelCase )
snake_case = add_prefix_space
def lowerCAmelCase ( self : Optional[int] , *__snake_case : Tuple , **__snake_case : Optional[int] )-> BatchEncoding:
snake_case = kwargs.get("""is_split_into_words""" , __lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def lowerCAmelCase ( self : Any , *__snake_case : Tuple , **__snake_case : Tuple )-> BatchEncoding:
snake_case = kwargs.get("""is_split_into_words""" , __lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Any = None )-> Tuple[str]:
snake_case = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def lowerCAmelCase ( self : Optional[Any] , __snake_case : int )-> List[int]:
snake_case = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) + [self.eos_token_id] )
if len(__lowerCamelCase ) > self.model_max_length:
snake_case = input_ids[-self.model_max_length :]
return input_ids
| 369 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
@slow
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : int = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
lowerCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowerCAmelCase__ : Any = tokenizer('''Hello there''' ,return_tensors='''np''' ).input_ids
lowerCAmelCase__ : Optional[Any] = tokenizer('''Hi I am''' ,return_tensors='''np''' ).input_ids
lowerCAmelCase__ : Any = shift_tokens_right(__lowerCamelCase ,model.config.pad_token_id ,model.config.decoder_start_token_id )
lowerCAmelCase__ : Any = model(__lowerCamelCase ,decoder_input_ids=__lowerCamelCase ).logits
lowerCAmelCase__ : List[str] = optax.softmax_cross_entropy(__lowerCamelCase ,onehot(__lowerCamelCase ,logits.shape[-1] ) ).mean()
lowerCAmelCase__ : Union[str, Any] = -(labels.shape[-1] * loss.item())
lowerCAmelCase__ : Any = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 647 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : List[str] = tempfile.mkdtemp()
# fmt: off
lowercase_ : Dict = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
# fmt: on
lowercase_ : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
lowercase_ : Optional[int] = {
'do_resize': True,
'size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.5, 0.5, 0.5],
'image_std': [0.5, 0.5, 0.5],
}
lowercase_ : List[Any] = os.path.join(self.tmpdirname ,__UpperCamelCase )
with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : str = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
lowercase_ : int = [Image.fromarray(np.moveaxis(__UpperCamelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : List[Any] = self.get_tokenizer()
lowercase_ : int = self.get_image_processor()
lowercase_ : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
lowercase_ : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Optional[int] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase_ : Dict = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' )
lowercase_ : Union[str, Any] = self.get_image_processor(do_normalize=__UpperCamelCase ,padding_value=1.0 )
lowercase_ : int = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=__UpperCamelCase ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Optional[int] = self.get_image_processor()
lowercase_ : int = self.get_tokenizer()
lowercase_ : List[Any] = VisionTextDualEncoderProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : Union[str, Any] = self.prepare_image_inputs()
lowercase_ : Optional[Any] = image_processor(__UpperCamelCase ,return_tensors='np' )
lowercase_ : Tuple = processor(images=__UpperCamelCase ,return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Optional[int] = self.get_image_processor()
lowercase_ : Tuple = self.get_tokenizer()
lowercase_ : List[Any] = VisionTextDualEncoderProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : Any = 'lower newer'
lowercase_ : str = processor(text=__UpperCamelCase )
lowercase_ : Dict = tokenizer(__UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : int = self.get_image_processor()
lowercase_ : List[str] = self.get_tokenizer()
lowercase_ : str = VisionTextDualEncoderProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : Union[str, Any] = 'lower newer'
lowercase_ : str = self.prepare_image_inputs()
lowercase_ : int = processor(text=__UpperCamelCase ,images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) ,['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with self.assertRaises(__UpperCamelCase ):
processor()
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : int = self.get_image_processor()
lowercase_ : List[Any] = self.get_tokenizer()
lowercase_ : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase_ : Dict = processor.batch_decode(__UpperCamelCase )
lowercase_ : Optional[int] = tokenizer.batch_decode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Dict = self.get_image_processor()
lowercase_ : Tuple = self.get_tokenizer()
lowercase_ : Tuple = VisionTextDualEncoderProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : int = 'lower newer'
lowercase_ : Optional[int] = self.prepare_image_inputs()
lowercase_ : int = processor(text=__UpperCamelCase ,images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 477 | """simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowercase_ : Optional[int] = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' ,safety_checker=__UpperCamelCase ,cache_dir=__UpperCamelCase )
lowercase_ : Dict = [t[-1] for t in os.walk(os.path.join(__UpperCamelCase ,os.listdir(__UpperCamelCase )[0] ,'snapshots' ) )]
lowercase_ : Tuple = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ , lowercase_ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' ,safety_checker=__UpperCamelCase )
lowercase_ : Dict = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowercase_ : Optional[Any] = jax.random.PRNGKey(0 )
lowercase_ : Optional[int] = 4
lowercase_ : int = jax.device_count()
lowercase_ : Optional[int] = num_samples * [prompt]
lowercase_ : List[Any] = pipeline.prepare_inputs(__UpperCamelCase )
# shard inputs and rng
lowercase_ : Dict = replicate(__UpperCamelCase )
lowercase_ : Optional[Any] = jax.random.split(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Dict = shard(__UpperCamelCase )
lowercase_ : List[Any] = pipeline(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,jit=__UpperCamelCase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 4.151_4745 ) < 1e-3
assert np.abs(np.abs(__UpperCamelCase ,dtype=np.floataa ).sum() - 4_9947.875 ) < 5e-1
lowercase_ : List[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__UpperCamelCase ) == num_samples
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='flax' ,safety_checker=__UpperCamelCase )
lowercase_ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowercase_ : List[str] = jax.random.PRNGKey(0 )
lowercase_ : Tuple = 50
lowercase_ : Optional[Any] = jax.device_count()
lowercase_ : List[Any] = num_samples * [prompt]
lowercase_ : Union[str, Any] = pipeline.prepare_inputs(__UpperCamelCase )
# shard inputs and rng
lowercase_ : List[str] = replicate(__UpperCamelCase )
lowercase_ : Optional[int] = jax.random.split(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Optional[Any] = shard(__UpperCamelCase )
lowercase_ : Optional[int] = pipeline(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,jit=__UpperCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0565_2401) ) < 1e-3
assert np.abs((np.abs(__UpperCamelCase ,dtype=np.floataa ).sum() - 238_3808.2) ) < 5e-1
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ , lowercase_ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='bf16' ,dtype=jnp.bfloataa ,safety_checker=__UpperCamelCase )
lowercase_ : List[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowercase_ : Dict = jax.random.PRNGKey(0 )
lowercase_ : Optional[int] = 50
lowercase_ : Tuple = jax.device_count()
lowercase_ : Dict = num_samples * [prompt]
lowercase_ : Any = pipeline.prepare_inputs(__UpperCamelCase )
# shard inputs and rng
lowercase_ : int = replicate(__UpperCamelCase )
lowercase_ : int = jax.random.split(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Union[str, Any] = shard(__UpperCamelCase )
lowercase_ : List[str] = pipeline(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,jit=__UpperCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0400_3906) ) < 1e-3
assert np.abs((np.abs(__UpperCamelCase ,dtype=np.floataa ).sum() - 237_3516.75) ) < 5e-1
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ , lowercase_ : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='bf16' ,dtype=jnp.bfloataa )
lowercase_ : Optional[int] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowercase_ : Optional[Any] = jax.random.PRNGKey(0 )
lowercase_ : Union[str, Any] = 50
lowercase_ : Optional[Any] = jax.device_count()
lowercase_ : Tuple = num_samples * [prompt]
lowercase_ : Union[str, Any] = pipeline.prepare_inputs(__UpperCamelCase )
# shard inputs and rng
lowercase_ : List[Any] = replicate(__UpperCamelCase )
lowercase_ : Union[str, Any] = jax.random.split(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Optional[int] = shard(__UpperCamelCase )
lowercase_ : Optional[Any] = pipeline(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,jit=__UpperCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0400_3906) ) < 1e-3
assert np.abs((np.abs(__UpperCamelCase ,dtype=np.floataa ).sum() - 237_3516.75) ) < 5e-1
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Tuple = FlaxDDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule='scaled_linear' ,set_alpha_to_one=__UpperCamelCase ,steps_offset=1 ,)
lowercase_ , lowercase_ : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='bf16' ,dtype=jnp.bfloataa ,scheduler=__UpperCamelCase ,safety_checker=__UpperCamelCase ,)
lowercase_ : Optional[int] = scheduler.create_state()
lowercase_ : List[Any] = scheduler_state
lowercase_ : List[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowercase_ : str = jax.random.PRNGKey(0 )
lowercase_ : str = 50
lowercase_ : List[Any] = jax.device_count()
lowercase_ : List[str] = num_samples * [prompt]
lowercase_ : Union[str, Any] = pipeline.prepare_inputs(__UpperCamelCase )
# shard inputs and rng
lowercase_ : List[Any] = replicate(__UpperCamelCase )
lowercase_ : int = jax.random.split(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : str = shard(__UpperCamelCase )
lowercase_ : str = pipeline(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,jit=__UpperCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0_4504_3945) ) < 1e-3
assert np.abs((np.abs(__UpperCamelCase ,dtype=np.floataa ).sum() - 234_7693.5) ) < 5e-1
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowercase_ : Union[str, Any] = jax.device_count()
lowercase_ : List[str] = num_samples * [prompt]
lowercase_ : int = jax.random.split(jax.random.PRNGKey(0 ) ,__UpperCamelCase )
lowercase_ , lowercase_ : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='bf16' ,dtype=jnp.bfloataa ,safety_checker=__UpperCamelCase ,)
lowercase_ : str = replicate(__UpperCamelCase )
lowercase_ : int = pipeline.prepare_inputs(__UpperCamelCase )
lowercase_ : Optional[int] = shard(__UpperCamelCase )
lowercase_ : Tuple = pipeline(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,jit=__UpperCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
lowercase_ : Union[str, Any] = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
lowercase_ , lowercase_ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='bf16' ,dtype=jnp.bfloataa ,safety_checker=__UpperCamelCase ,use_memory_efficient_attention=__UpperCamelCase ,)
lowercase_ : List[Any] = replicate(__UpperCamelCase )
lowercase_ : Union[str, Any] = pipeline.prepare_inputs(__UpperCamelCase )
lowercase_ : List[Any] = shard(__UpperCamelCase )
lowercase_ : Any = pipeline(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,jit=__UpperCamelCase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
lowercase_ : Optional[int] = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 477 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
lowerCamelCase_ = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ["DPTFeatureExtractor"]
lowerCamelCase_ = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 498 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __lowerCamelCase ( a_ : Dict ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE :Optional[int] = os.path.join(args.tf_model_dir , '''parameters.json''' )
__SCREAMING_SNAKE_CASE :Dict = json.loads(open(a_ ).read() )
if not params:
raise ValueError(
f'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('''.pt''' ):
__SCREAMING_SNAKE_CASE :Tuple = args.output + '''.pt'''
__SCREAMING_SNAKE_CASE :Union[str, Any] = OrderedDict()
with tf.device('''/CPU:0''' ):
__SCREAMING_SNAKE_CASE :Optional[int] = tf.train.load_checkpoint(args.tf_model_dir )
__SCREAMING_SNAKE_CASE :Tuple = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
__SCREAMING_SNAKE_CASE :str = reader.get_tensor(a_ ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
__SCREAMING_SNAKE_CASE :Optional[Any] = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
__SCREAMING_SNAKE_CASE :List[str] = 8
__SCREAMING_SNAKE_CASE :List[str] = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
__SCREAMING_SNAKE_CASE :Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__SCREAMING_SNAKE_CASE :Any = torch.tensor(a_ )
elif key_name.startswith('''model/moe''' ):
__SCREAMING_SNAKE_CASE :List[Any] = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
__SCREAMING_SNAKE_CASE :str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__SCREAMING_SNAKE_CASE :Optional[int] = torch.tensor(a_ )
elif key_name.endswith('''/softmlp/kernel''' ):
__SCREAMING_SNAKE_CASE :Tuple = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
__SCREAMING_SNAKE_CASE :Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__SCREAMING_SNAKE_CASE :List[Any] = torch.tensor(a_ )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
__SCREAMING_SNAKE_CASE :Optional[Any] = key_name[-9:-7]
for i in range(16 ):
__SCREAMING_SNAKE_CASE :int = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
__SCREAMING_SNAKE_CASE :List[Any] = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
__SCREAMING_SNAKE_CASE :Optional[int] = torch.tensor(a_ )
elif key_name.startswith('''model/mlp''' ):
__SCREAMING_SNAKE_CASE :Optional[int] = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
__SCREAMING_SNAKE_CASE :Optional[Any] = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
__SCREAMING_SNAKE_CASE :Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__SCREAMING_SNAKE_CASE :str = torch.tensor(a_ )
elif key_name.endswith('''/p1/bias''' ):
__SCREAMING_SNAKE_CASE :List[Any] = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
__SCREAMING_SNAKE_CASE :List[str] = vnp.copy() # same because it is one dimensional
__SCREAMING_SNAKE_CASE :int = torch.tensor(a_ )
elif key_name.endswith('''/p2/kernel''' ):
__SCREAMING_SNAKE_CASE :Optional[Any] = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
__SCREAMING_SNAKE_CASE :Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__SCREAMING_SNAKE_CASE :Dict = torch.tensor(a_ )
elif key_name.endswith('''/p2/bias''' ):
__SCREAMING_SNAKE_CASE :Dict = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
__SCREAMING_SNAKE_CASE :Optional[int] = vnp.copy() # same because it is one dimensional
__SCREAMING_SNAKE_CASE :int = torch.tensor(a_ )
elif key_name.startswith('''model/ln''' ):
__SCREAMING_SNAKE_CASE :Tuple = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
__SCREAMING_SNAKE_CASE :Optional[Any] = '''model.blocks.%d.feed_forward.norm.bias''' % player
__SCREAMING_SNAKE_CASE :Dict = vnp.copy() # same because it is one dimensional
__SCREAMING_SNAKE_CASE :List[str] = torch.tensor(a_ )
elif key_name.endswith('''/g''' ):
__SCREAMING_SNAKE_CASE :Any = '''model.blocks.%d.feed_forward.norm.weight''' % player
__SCREAMING_SNAKE_CASE :List[Any] = vnp.copy() # same because it is one dimensional
__SCREAMING_SNAKE_CASE :Tuple = torch.tensor(a_ )
elif key_name.startswith('''model/att''' ):
__SCREAMING_SNAKE_CASE :Tuple = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
__SCREAMING_SNAKE_CASE :Any = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
__SCREAMING_SNAKE_CASE :Union[str, Any] = state[:, 0, :, :]
__SCREAMING_SNAKE_CASE :Dict = state[:, 1, :, :]
__SCREAMING_SNAKE_CASE :Union[str, Any] = state[:, 2, :, :]
__SCREAMING_SNAKE_CASE :Optional[Any] = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
__SCREAMING_SNAKE_CASE :Union[str, Any] = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
__SCREAMING_SNAKE_CASE :Tuple = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
__SCREAMING_SNAKE_CASE :Any = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
__SCREAMING_SNAKE_CASE :List[Any] = torch.tensor(a_ )
__SCREAMING_SNAKE_CASE :int = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
__SCREAMING_SNAKE_CASE :str = torch.tensor(a_ )
__SCREAMING_SNAKE_CASE :int = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
__SCREAMING_SNAKE_CASE :str = torch.tensor(a_ )
elif key_name.endswith('''/o/kernel''' ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
__SCREAMING_SNAKE_CASE :Any = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
__SCREAMING_SNAKE_CASE :Union[str, Any] = torch.tensor(a_ )
elif key_name.startswith('''model/an''' ):
__SCREAMING_SNAKE_CASE :Optional[int] = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
__SCREAMING_SNAKE_CASE :List[Any] = '''model.blocks.%d.self_attn.norm.bias''' % player
__SCREAMING_SNAKE_CASE :Tuple = vnp.copy() # same because it is one dimensional
__SCREAMING_SNAKE_CASE :Union[str, Any] = torch.tensor(a_ )
elif key_name.endswith('''/g''' ):
__SCREAMING_SNAKE_CASE :Optional[int] = '''model.blocks.%d.self_attn.norm.weight''' % player
__SCREAMING_SNAKE_CASE :List[str] = vnp.copy() # same because it is one dimensional
__SCREAMING_SNAKE_CASE :Tuple = torch.tensor(a_ )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
__SCREAMING_SNAKE_CASE :str = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
__SCREAMING_SNAKE_CASE :Optional[int] = '''model.%s.weight''' % nlayer
__SCREAMING_SNAKE_CASE :int = vnp.copy() # same in embedded
__SCREAMING_SNAKE_CASE :str = torch.tensor(a_ )
if key_name.startswith('''model/wte''' ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = '''lm_head.weight'''
__SCREAMING_SNAKE_CASE :Optional[Any] = vnp.copy() # same in embedded
__SCREAMING_SNAKE_CASE :List[str] = torch.tensor(a_ )
elif key_name.startswith('''model/wob''' ):
__SCREAMING_SNAKE_CASE :Any = '''final_logits_bias'''
__SCREAMING_SNAKE_CASE :int = vnp.copy() # same in embedded
__SCREAMING_SNAKE_CASE :List[Any] = state.reshape((1, -1) )
__SCREAMING_SNAKE_CASE :str = torch.tensor(a_ )
elif key_name == "model/dense/kernel":
__SCREAMING_SNAKE_CASE :int = '''model.last_project.weight'''
__SCREAMING_SNAKE_CASE :Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__SCREAMING_SNAKE_CASE :Dict = torch.tensor(a_ )
elif key_name == "model/dense_1/bias":
__SCREAMING_SNAKE_CASE :List[str] = '''model.last_project.bias'''
__SCREAMING_SNAKE_CASE :Any = vnp.copy() # same because it is one dimensional
__SCREAMING_SNAKE_CASE :Optional[Any] = torch.tensor(a_ )
torch.save(a_ , args.output )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
lowerCamelCase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args) | 498 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class _lowerCAmelCase ( __a ):
_lowercase ='''data2vec-vision'''
def __init__( self , _UpperCamelCase=768 , _UpperCamelCase=12 , _UpperCamelCase=12 , _UpperCamelCase=3_072 , _UpperCamelCase="gelu" , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-1_2 , _UpperCamelCase=224 , _UpperCamelCase=16 , _UpperCamelCase=3 , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=True , _UpperCamelCase=[3, 5, 7, 11] , _UpperCamelCase=[1, 2, 3, 6] , _UpperCamelCase=True , _UpperCamelCase=0.4 , _UpperCamelCase=256 , _UpperCamelCase=1 , _UpperCamelCase=False , _UpperCamelCase=255 , **_UpperCamelCase , ) -> List[str]:
super().__init__(**_UpperCamelCase )
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = image_size
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = use_mask_token
lowerCAmelCase_ = use_absolute_position_embeddings
lowerCAmelCase_ = use_relative_position_bias
lowerCAmelCase_ = use_shared_relative_position_bias
lowerCAmelCase_ = layer_scale_init_value
lowerCAmelCase_ = drop_path_rate
lowerCAmelCase_ = use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCAmelCase_ = out_indices
lowerCAmelCase_ = pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase_ = use_auxiliary_head
lowerCAmelCase_ = auxiliary_loss_weight
lowerCAmelCase_ = auxiliary_channels
lowerCAmelCase_ = auxiliary_num_convs
lowerCAmelCase_ = auxiliary_concat_input
lowerCAmelCase_ = semantic_loss_ignore_index
class _lowerCAmelCase ( __a ):
_lowercase =version.parse('''1.11''' )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __a ( self ) -> float:
return 1e-4
| 279 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
_lowercase =StableUnCLIPPipeline
_lowercase =TEXT_TO_IMAGE_PARAMS
_lowercase =TEXT_TO_IMAGE_BATCH_PARAMS
_lowercase =TEXT_TO_IMAGE_IMAGE_PARAMS
_lowercase =TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_lowercase =False
def __a ( self ) -> Dict:
lowerCAmelCase_ = 32
lowerCAmelCase_ = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_UpperCamelCase , projection_dim=_UpperCamelCase , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
lowerCAmelCase_ = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_UpperCamelCase , num_layers=1 , )
torch.manual_seed(0 )
lowerCAmelCase_ = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_000 , clip_sample=_UpperCamelCase , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
lowerCAmelCase_ = StableUnCLIPImageNormalizer(embedding_dim=_UpperCamelCase )
lowerCAmelCase_ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_UpperCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_UpperCamelCase , layers_per_block=1 , upcast_attention=_UpperCamelCase , use_linear_projection=_UpperCamelCase , )
torch.manual_seed(0 )
lowerCAmelCase_ = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=_UpperCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowerCAmelCase_ = AutoencoderKL()
lowerCAmelCase_ = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def __a ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> Tuple:
if str(_UpperCamelCase ).startswith("mps" ):
lowerCAmelCase_ = torch.manual_seed(_UpperCamelCase )
else:
lowerCAmelCase_ = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
lowerCAmelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=_UpperCamelCase )
def __a ( self ) -> str:
lowerCAmelCase_ = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=_UpperCamelCase )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def __a ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
lowerCAmelCase_ = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase_ = pipe("anime turle" , generator=_UpperCamelCase , output_type="np" )
lowerCAmelCase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase )
def __a ( self ) -> Optional[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase_ = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
lowerCAmelCase_ = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase_ = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
lowerCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 279 | 1 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Dict:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowercase_ = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
lowercase_ = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
lowercase_ = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
lowercase_ = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
lowercase_ = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
lowercase_ = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowercase_ = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
lowercase_ = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
lowercase_ = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
lowercase_ = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
lowercase_ = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
lowercase_ = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
lowercase_ = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
lowercase_ = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
lowercase_ = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
lowercase_ = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
lowercase_ = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
lowercase_ = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
lowercase_ = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
lowercase_ = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
lowercase_ = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
lowercase_ = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
lowercase_ = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
lowercase_ = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase_ = orig_state_dict.pop(__lowerCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase_ = key.split(""".""" )
lowercase_ , lowercase_ = int(key_split[2] ), int(key_split[4] )
lowercase_ = config.vision_config.hidden_size
if "weight" in key:
lowercase_ = val[:dim, :]
lowercase_ = val[dim : dim * 2, :]
lowercase_ = val[-dim:, :]
else:
lowercase_ = val[:dim]
lowercase_ = val[dim : dim * 2]
lowercase_ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase_ = key.split(""".""" )
lowercase_ = int(key_split[3] )
lowercase_ = config.text_config.hidden_size
if "weight" in key:
lowercase_ = val[:dim, :]
lowercase_ = val[
dim : dim * 2, :
]
lowercase_ = val[-dim:, :]
else:
lowercase_ = val[:dim]
lowercase_ = val[dim : dim * 2]
lowercase_ = val[-dim:]
else:
lowercase_ = rename_key(__lowerCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowercase_ = val.squeeze_()
else:
lowercase_ = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
'''simple docstring'''
lowercase_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase_ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase="groupvit-gcc-yfcc" , __lowerCAmelCase=False ) -> int:
'''simple docstring'''
lowercase_ = GroupViTConfig()
lowercase_ = GroupViTModel(__lowerCAmelCase ).eval()
lowercase_ = torch.load(__lowerCAmelCase , map_location="""cpu""" )["""model"""]
lowercase_ = convert_state_dict(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ , lowercase_ = model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(__lowerCAmelCase ) == 0)
# verify result
lowercase_ = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
lowercase_ = prepare_img()
lowercase_ = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors="""pt""" )
with torch.no_grad():
lowercase_ = model(**__lowerCAmelCase )
if model_name == "groupvit-gcc-yfcc":
lowercase_ = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
lowercase_ = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(F'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image , __lowerCAmelCase , atol=1E-3 )
processor.save_pretrained(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
print("""Successfully saved processor and model to""" , __lowerCAmelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(__lowerCAmelCase , organization="""nielsr""" )
model.push_to_hub(__lowerCAmelCase , organization="""nielsr""" )
if __name__ == "__main__":
UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model."
)
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint")
parser.add_argument(
"--model_name",
default="groupvit-gccy-fcc",
type=str,
help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.",
)
UpperCAmelCase : Optional[int] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 567 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCAmelCase : List[str] = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "facebook/nllb-200-distilled-600M"
lowercase__ = (
"This is a tool that translates text from a language to another. It takes three inputs: `text`, which should "
"be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, "
"which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in "
"plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."
)
lowercase__ = "translator"
lowercase__ = AutoTokenizer
lowercase__ = AutoModelForSeqaSeqLM
lowercase__ = LANGUAGE_CODES
lowercase__ = ["text", "text", "text"]
lowercase__ = ["text"]
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple):
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(F'''{src_lang} is not a supported language.''')
if tgt_lang not in self.lang_to_code:
raise ValueError(F'''{tgt_lang} is not a supported language.''')
lowercase_ = self.lang_to_code[src_lang]
lowercase_ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCAmelCase_ , return_tensors="""pt""" , src_lang=lowerCAmelCase_ , tgt_lang=lowerCAmelCase_)
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : int):
"""simple docstring"""
return self.model.generate(**lowerCAmelCase_)
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : str):
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCAmelCase_)
| 567 | 1 |
'''simple docstring'''
a : int = [0, 2, 4, 6, 8]
a : Any = [1, 3, 5, 7, 9]
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict ) -> int:
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__snake_case = 0
for digit in range(10 ):
__snake_case = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , __UpperCAmelCase , __UpperCAmelCase )
return result
__snake_case = 0
for digita in range(10 ):
__snake_case = digita
if (remainder + digita) % 2 == 0:
__snake_case = ODD_DIGITS
else:
__snake_case = EVEN_DIGITS
for digita in other_parity_digits:
__snake_case = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , __UpperCAmelCase , __UpperCAmelCase , )
return result
def __UpperCAmelCase ( _UpperCAmelCase : Dict = 9 ) -> int:
__snake_case = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(__UpperCAmelCase , 0 , [0] * length , __UpperCAmelCase )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 713 |
'''simple docstring'''
import os
from math import logaa
def __UpperCAmelCase ( _UpperCAmelCase : str = "base_exp.txt" ) -> int:
__snake_case = 0
__snake_case = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(_UpperCAmelCase ) , _UpperCAmelCase ) ) ):
__snake_case , __snake_case = list(map(_UpperCAmelCase , line.split("," ) ) )
if x * logaa(_UpperCAmelCase ) > largest:
__snake_case = x * logaa(_UpperCAmelCase )
__snake_case = i + 1
return result
if __name__ == "__main__":
print(solution())
| 680 | 0 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=2 , lowerCAmelCase__=2_4 , lowerCAmelCase__=1_6 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=3_2 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=None , lowerCAmelCase__=2 , lowerCAmelCase__=2 , ):
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = max_length
__SCREAMING_SNAKE_CASE = num_mel_bins
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = frequency_stride
__SCREAMING_SNAKE_CASE = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__SCREAMING_SNAKE_CASE = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
__SCREAMING_SNAKE_CASE = (self.max_length - self.patch_size) // self.time_stride + 1
__SCREAMING_SNAKE_CASE = frequency_out_dimension * time_out_dimension
__SCREAMING_SNAKE_CASE = num_patches + 2
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins])
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_values, labels
def snake_case_ ( self):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = ASTModel(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""input_values""": input_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( __a , __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Dict = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
__lowercase : List[Any] = (
{'''audio-classification''': ASTForAudioClassification, '''feature-extraction''': ASTModel}
if is_torch_available()
else {}
)
__lowercase : List[str] = False
__lowercase : List[str] = False
__lowercase : Dict = False
__lowercase : Optional[int] = False
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = ASTModelTester(self)
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=3_7)
def snake_case_ ( self):
self.config_tester.run_common_tests()
@unittest.skip(reason="""AST does not use inputs_embeds""")
def snake_case_ ( self):
pass
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear))
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""input_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
@slow
def snake_case_ ( self):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = ASTModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = torchaudio.load(UpperCamelCase_ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case_ ( self):
return (
ASTFeatureExtractor.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""")
if is_torchaudio_available()
else None
)
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.default_feature_extractor
__SCREAMING_SNAKE_CASE = ASTForAudioClassification.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""").to(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.default_feature_extractor
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = prepare_audio()
__SCREAMING_SNAKE_CASE = audio.squeeze().numpy()
__SCREAMING_SNAKE_CASE = feature_extractor(lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , return_tensors="""pt""").to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__)
# verify the logits
__SCREAMING_SNAKE_CASE = torch.Size((1, 5_2_7))
self.assertEqual(outputs.logits.shape , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = torch.tensor([-0.87_60, -7.00_42, -8.66_02]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4))
| 155 |
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = []
create_all_state(1 , UpperCamelCase_ , UpperCamelCase_ , [] , UpperCamelCase_ )
return result
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(UpperCamelCase_ , total_number - level + 2 ):
current_list.append(UpperCamelCase_ )
create_all_state(i + 1 , UpperCamelCase_ , level - 1 , UpperCamelCase_ , UpperCamelCase_ )
current_list.pop()
def _lowerCAmelCase ( UpperCamelCase_ ):
for i in total_list:
print(*UpperCamelCase_ )
if __name__ == "__main__":
__magic_name__ = 4
__magic_name__ = 2
__magic_name__ = generate_all_combinations(n, k)
print_all_state(total_list)
| 155 | 1 |
import torch
from transformers import AutoModel
class _UpperCAmelCase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : str , lowerCAmelCase_ : str="sayef/fsner-bert-base-uncased" ) -> Optional[Any]:
super(lowerCAmelCase_ , self ).__init__()
__lowerCAmelCase = AutoModel.from_pretrained(lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
__lowerCAmelCase = torch.nn.CosineSimilarity(3 , 1e-08 )
__lowerCAmelCase = torch.nn.Softmax(dim=1 )
def lowercase ( self : List[Any] , **lowerCAmelCase_ : List[Any] ) -> Dict:
return self.bert(**lowerCAmelCase_ ).last_hidden_state
def lowercase ( self : Optional[int] , lowerCAmelCase_ : str ) -> List[str]:
return token_embeddings.sum(2 , keepdim=lowerCAmelCase_ )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict=1 ) -> Any:
return self.softmax(T * self.cos(lowerCAmelCase_ , lowerCAmelCase_ ) )
def lowercase ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] ) -> Dict:
__lowerCAmelCase = W_supports['sizes'].tolist()
__lowerCAmelCase = W_supports['start_token_id'].item()
__lowerCAmelCase = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__lowerCAmelCase = self.BERT(**lowerCAmelCase_ )
__lowerCAmelCase = self.BERT(**lowerCAmelCase_ )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = W_supports['input_ids'] == start_token_id
__lowerCAmelCase = W_supports['input_ids'] == end_token_id
for i, size in enumerate(lowerCAmelCase_ ):
if i == 0:
__lowerCAmelCase = 0
else:
__lowerCAmelCase = support_sizes[i - 1]
__lowerCAmelCase = S[s : s + size][start_token_masks[s : s + size]]
__lowerCAmelCase = S[s : s + size][end_token_masks[s : s + size]]
__lowerCAmelCase = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__lowerCAmelCase = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__lowerCAmelCase = torch.vstack((p_starts, p_start) )
__lowerCAmelCase = torch.vstack((p_ends, p_end) )
else:
__lowerCAmelCase = p_start
__lowerCAmelCase = p_end
return p_starts, p_ends
| 421 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def a_ ( lowerCAmelCase_ : List[Any] ):
__lowerCAmelCase = [
'decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : str ):
__lowerCAmelCase , __lowerCAmelCase = emb.weight.shape
__lowerCAmelCase = nn.Linear(lowerCAmelCase_, lowerCAmelCase_, bias=lowerCAmelCase_ )
__lowerCAmelCase = emb.weight.data
return lin_layer
def a_ ( lowerCAmelCase_ : List[Any] ):
__lowerCAmelCase = torch.load(lowerCAmelCase_, map_location='cpu' )
__lowerCAmelCase = Namespace(**checkpoint['cfg']['model'] )
__lowerCAmelCase = checkpoint['model']
remove_ignore_keys_(lowerCAmelCase_ )
__lowerCAmelCase = state_dict['decoder.embed_tokens.weight'].shape[0]
__lowerCAmelCase = {key.replace('decoder', 'model' ): val for key, val in state_dict.items()}
__lowerCAmelCase = XGLMConfig(
vocab_size=lowerCAmelCase_, max_position_embeddings=args.max_target_positions, num_layers=args.decoder_layers, attention_heads=args.decoder_attention_heads, ffn_dim=args.decoder_ffn_embed_dim, d_model=args.decoder_embed_dim, layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='gelu', scale_embedding=not args.no_scale_embedding, tie_word_embeddings=args.share_decoder_input_output_embed, )
__lowerCAmelCase = XGLMForCausalLM(lowerCAmelCase_ )
__lowerCAmelCase = model.load_state_dict(lowerCAmelCase_, strict=lowerCAmelCase_ )
print(lowerCAmelCase_ )
__lowerCAmelCase = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
_snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_snake_case : List[str] = parser.parse_args()
_snake_case : Dict = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 421 | 1 |
from __future__ import annotations
import math
def _snake_case ( __snake_case ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_lowerCAmelCase = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def _snake_case ( __snake_case ):
if not isinstance(__snake_case , __snake_case ):
raise ValueError('''n must be an integer''' )
if n <= 0:
raise ValueError('''n must be >= 0''' )
_UpperCamelCase = []
for num in range(len(__snake_case ) ):
_UpperCamelCase = 0
while 2 * i * i <= odd_composites[num]:
_UpperCamelCase = odd_composites[num] - 2 * i * i
if is_prime(__snake_case ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__snake_case ) == n:
return list_nums
return []
def _snake_case ( ):
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'{solution() = }')
| 10 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __A ( nn.Module ):
def __init__( self :List[Any] ):
'''simple docstring'''
super().__init__()
__magic_name__ : Tuple =nn.Linear(3 , 4 )
__magic_name__ : Union[str, Any] =nn.BatchNormad(4 )
__magic_name__ : List[str] =nn.Linear(4 , 5 )
def A__ ( self :Dict , __snake_case :Tuple ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(__snake_case ) ) )
class __A ( UpperCamelCase__ ):
def A__ ( self :Any , __snake_case :Optional[Any] , *__snake_case :List[Any] , **__snake_case :Any ):
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class __A ( UpperCamelCase__ ):
def A__ ( self :List[str] , __snake_case :Tuple , __snake_case :Union[str, Any] ):
'''simple docstring'''
return output + 1
class __A ( unittest.TestCase ):
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
__magic_name__ : Tuple =ModelHook()
add_hook_to_module(__snake_case , __snake_case )
self.assertEqual(test_model._hf_hook , __snake_case )
self.assertTrue(hasattr(__snake_case , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__snake_case )
self.assertFalse(hasattr(__snake_case , """_hf_hook""" ) )
self.assertFalse(hasattr(__snake_case , """_old_forward""" ) )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
__magic_name__ : List[str] =ModelHook()
add_hook_to_module(__snake_case , __snake_case )
add_hook_to_module(__snake_case , __snake_case , append=__snake_case )
self.assertEqual(isinstance(test_model._hf_hook , __snake_case ) , __snake_case )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__snake_case , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__snake_case )
self.assertFalse(hasattr(__snake_case , """_hf_hook""" ) )
self.assertFalse(hasattr(__snake_case , """_old_forward""" ) )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =ModelForTest()
__magic_name__ : Any =torch.randn(2 , 3 )
__magic_name__ : Any =test_model(x + 1 )
__magic_name__ : Optional[Any] =test_model(x + 2 )
__magic_name__ : int =PreForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : int =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__magic_name__ : str =PreForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : List[str] =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__magic_name__ : Optional[Any] =SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Any =test_model(__snake_case )
assert torch.allclose(__snake_case , __snake_case , atol=1E-5 )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Optional[Any] =ModelForTest()
__magic_name__ : Dict =torch.randn(2 , 3 )
__magic_name__ : Any =test_model(__snake_case )
__magic_name__ : Dict =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Any =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__magic_name__ : Union[str, Any] =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Optional[int] =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__magic_name__ : Union[str, Any] =SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Union[str, Any] =test_model(__snake_case )
assert torch.allclose(__snake_case , output + 2 , atol=1E-5 )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : Tuple =ModelForTest()
__magic_name__ : int =torch.randn(2 , 3 )
__magic_name__ : Union[str, Any] =test_model(__snake_case )
__magic_name__ : Union[str, Any] =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Dict =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__magic_name__ : Any =True
__magic_name__ : Any =test_model(__snake_case )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : Optional[Any] =model(__snake_case )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__snake_case , AlignDevicesHook(io_same_device=__snake_case ) )
__magic_name__ : int =torch.randn(2 , 3 ).to(0 )
__magic_name__ : Optional[int] =model(__snake_case )
self.assertEqual(output.device , torch.device(0 ) )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : int ={"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[int] =torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : Union[str, Any] =torch.randn(2 , 3 )
__magic_name__ : Optional[int] =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__magic_name__ : Tuple ={
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : Tuple =torch.randn(2 , 3 )
__magic_name__ : int =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : Any =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : str =0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__snake_case , execution_device=__snake_case , offload=__snake_case )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[Any] =torch.device(__snake_case )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : str =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__snake_case , execution_device=__snake_case , offload=__snake_case , offload_buffers=__snake_case )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : Optional[int] =torch.randn(2 , 3 )
__magic_name__ : Union[str, Any] =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Dict =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : List[str] =0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__snake_case , execution_device=__snake_case , offload=__snake_case , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[Any] =torch.device(__snake_case )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : int =torch.randn(2 , 3 )
__magic_name__ : Any =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__snake_case , execution_device=__snake_case , offload=__snake_case , weights_map=model.state_dict() , offload_buffers=__snake_case , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : str =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 21 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "timesformer"
def __init__(self , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=8 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1e-6 , _lowercase=True , _lowercase="divided_space_time" , _lowercase=0 , **_lowercase , ):
'''simple docstring'''
super().__init__(**_lowercase )
__a : Union[str, Any] = image_size
__a : Optional[Any] = patch_size
__a : Union[str, Any] = num_channels
__a : Optional[int] = num_frames
__a : Optional[int] = hidden_size
__a : Any = num_hidden_layers
__a : str = num_attention_heads
__a : Optional[Any] = intermediate_size
__a : Dict = hidden_act
__a : List[Any] = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : Any = initializer_range
__a : Tuple = layer_norm_eps
__a : Optional[Any] = qkv_bias
__a : Union[str, Any] = attention_type
__a : Optional[Any] = drop_path_rate
| 63 |
"""simple docstring"""
import os
def __magic_name__ ( _lowerCamelCase : Dict ):
__a : List[str] = len(grid[0] )
__a : int = len(_lowerCamelCase )
__a : Tuple = 0
__a : List[Any] = 0
__a : List[str] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_lowerCamelCase ):
for j in range(n_rows - 3 ):
__a : List[Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
__a : Tuple = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
__a : List[Any] = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
__a : List[Any] = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
__a : str = max(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if max_product > largest:
__a : Optional[Any] = max_product
return largest
def __magic_name__ ( ):
__a : Tuple = []
with open(os.path.dirname(_lowerCamelCase ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
__a : Tuple = [[int(_lowerCamelCase ) for i in grid[j]] for j in range(len(_lowerCamelCase ) )]
return largest_product(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 63 | 1 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] =XLMTokenizer
a : int =False
def _a ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase_: Optional[int] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
UpperCamelCase_: str = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
UpperCamelCase_: Union[str, Any] = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
UpperCamelCase_: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase_: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(_lowerCamelCase ) )
def _a ( self , _lowerCamelCase ):
UpperCamelCase_: Optional[Any] = 'lower newer'
UpperCamelCase_: str = 'lower newer'
return input_text, output_text
def _a ( self ):
UpperCamelCase_: str = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCamelCase_: List[Any] = 'lower'
UpperCamelCase_: Any = ['low', 'er</w>']
UpperCamelCase_: Optional[int] = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: int = tokens + ['<unk>']
UpperCamelCase_: Tuple = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
@slow
def _a ( self ):
UpperCamelCase_: Union[str, Any] = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' )
UpperCamelCase_: Optional[int] = tokenizer.encode('sequence builders' , add_special_tokens=_lowerCamelCase )
UpperCamelCase_: List[str] = tokenizer.encode('multi-sequence build' , add_special_tokens=_lowerCamelCase )
UpperCamelCase_: Optional[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
UpperCamelCase_: Dict = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1] | 57 |
"""simple docstring"""
import os
def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]:
'''simple docstring'''
with open(os.path.dirname(snake_case ) + "/p022_names.txt" ) as file:
UpperCAmelCase__ : Tuple = str(file.readlines()[0] )
UpperCAmelCase__ : str = names.replace("\"" , "" ).split("," )
names.sort()
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : List[str] = 0
for i, name in enumerate(snake_case ):
for letter in name:
name_score += ord(snake_case ) - 64
total_score += (i + 1) * name_score
UpperCAmelCase__ : Optional[int] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 438 | 0 |
'''simple docstring'''
def lowercase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> str:
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
_snake_case : str = str(bin(SCREAMING_SNAKE_CASE__ ) )[2:] # remove the leading "0b"
_snake_case : Union[str, Any] = str(bin(SCREAMING_SNAKE_CASE__ ) )[2:]
_snake_case : Dict = max(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
return "0b" + "".join(
str(int("""1""" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE__ ) , b_binary.zfill(SCREAMING_SNAKE_CASE__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
import argparse
import os
import re
a__ = """src/transformers/models/auto"""
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
a__ = re.compile(R"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""")
# re pattern that matches identifiers in mappings
a__ = re.compile(R"""\s*\(\s*\"(\S[^\"]+)\"""")
def lowercase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool = False ) -> Optional[Any]:
with open(SCREAMING_SNAKE_CASE__ , """r""" , encoding="""utf-8""" ) as f:
_snake_case : int = f.read()
_snake_case : str = content.split("""\n""" )
_snake_case : List[str] = []
_snake_case : int = 0
while line_idx < len(SCREAMING_SNAKE_CASE__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
_snake_case : Union[str, Any] = len(re.search(R"""^(\s*)\S""" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(""" """ * indent + """(""" ):
new_lines.append(lines[line_idx] )
line_idx += 1
_snake_case : int = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
_snake_case : Optional[Any] = line_idx
while not lines[line_idx].startswith(""" """ * indent + """)""" ):
line_idx += 1
blocks.append("""\n""".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
_snake_case : int = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : _re_identifier.search(SCREAMING_SNAKE_CASE__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(SCREAMING_SNAKE_CASE__ ) )
elif "\n".join(SCREAMING_SNAKE_CASE__ ) != content:
return True
def lowercase ( SCREAMING_SNAKE_CASE__ : bool = False ) -> int:
_snake_case : List[Any] = [os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for f in os.listdir(SCREAMING_SNAKE_CASE__ ) if f.endswith(""".py""" )]
_snake_case : str = [sort_auto_mapping(SCREAMING_SNAKE_CASE__ , overwrite=SCREAMING_SNAKE_CASE__ ) for fname in fnames]
if not overwrite and any(SCREAMING_SNAKE_CASE__ ):
_snake_case : List[Any] = [f for f, d in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if d]
raise ValueError(
F'''The following files have auto mappings that need sorting: {', '.join(SCREAMING_SNAKE_CASE__ )}. Run `make style` to fix'''
""" this.""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
a__ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 198 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class lowercase_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=lowerCamelCase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=lowerCamelCase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=lowerCamelCase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
SCREAMING_SNAKE_CASE : bool = field(default=lowerCamelCase__ , metadata={'help': 'Whether tp freeze the encoder.'} )
SCREAMING_SNAKE_CASE : bool = field(default=lowerCamelCase__ , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class lowercase_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=1_0_2_4 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=1_2_8 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=1_4_2 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=1_4_2 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
SCREAMING_SNAKE_CASE : Optional[int] = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
SCREAMING_SNAKE_CASE : Optional[int] = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
SCREAMING_SNAKE_CASE : Optional[int] = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=lowerCamelCase__ , metadata={'help': 'Source language id for translation.'} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=lowerCamelCase__ , metadata={'help': 'Target language id for translation.'} )
SCREAMING_SNAKE_CASE : Optional[int] = field(default=lowerCamelCase__ , metadata={'help': '# num_beams to use for evaluation.'} )
SCREAMING_SNAKE_CASE : bool = field(
default=lowerCamelCase__ , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
logger.info(F"***** {split} metrics *****" )
for key in sorted(metrics.keys() ):
logger.info(F" {key} = {metrics[key]}" )
save_json(A__ , os.path.join(A__ , F"{split}_results.json" ) )
def _A ( ):
"""simple docstring"""
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowercase , __lowercase , __lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
check_output_dir(A__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , A__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowercase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(A__ , A__ , A__ ):
assert hasattr(A__ , A__ ), F"({config.__class__.__name__}) doesn't have a `{p}` attribute"
setattr(A__ , A__ , getattr(A__ , A__ ) )
__lowercase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=A__ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(A__ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
__lowercase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(A__ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(A__ , A__ ):
__lowercase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
__lowercase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(A__ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
__lowercase = SeqaSeqDataset
# Get datasets
__lowercase = (
dataset_class(
A__ , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
__lowercase = (
dataset_class(
A__ , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
__lowercase = (
dataset_class(
A__ , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
__lowercase = (
build_compute_metrics_fn(data_args.task , A__ ) if training_args.predict_with_generate else None
)
__lowercase = SeqaSeqTrainer(
model=A__ , args=A__ , data_args=A__ , train_dataset=A__ , eval_dataset=A__ , data_collator=SeqaSeqDataCollator(
A__ , A__ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=A__ , tokenizer=A__ , )
__lowercase = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
__lowercase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
__lowercase = train_result.metrics
__lowercase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , A__ , training_args.output_dir )
all_metrics.update(A__ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__lowercase = trainer.evaluate(metric_key_prefix='''val''' )
__lowercase = data_args.n_val
__lowercase = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , A__ , training_args.output_dir )
all_metrics.update(A__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
__lowercase = trainer.predict(test_dataset=A__ , metric_key_prefix='''test''' )
__lowercase = test_output.metrics
__lowercase = data_args.n_test
if trainer.is_world_process_zero():
__lowercase = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , A__ , training_args.output_dir )
all_metrics.update(A__ )
if training_args.predict_with_generate:
__lowercase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ )
__lowercase = lmap(str.strip , A__ )
write_txt_file(A__ , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(A__ , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def _A ( A__ ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 41 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 'facebook/bart-large-mnli'
SCREAMING_SNAKE_CASE : Optional[Any] = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
SCREAMING_SNAKE_CASE : Any = 'text_classifier'
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : Tuple = ['text', ['text']]
SCREAMING_SNAKE_CASE : List[str] = ['text']
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
super().setup()
__lowercase = self.model.config
__lowercase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
__lowercase = int(lowercase__ )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Dict ,lowercase__ : List[Any] ):
__lowercase = labels
return self.pre_processor(
[text] * len(lowercase__ ) ,[F"This example is {label}" for label in labels] ,return_tensors='''pt''' ,padding='''max_length''' ,)
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = outputs.logits
__lowercase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 41 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( UpperCamelCase : int , UpperCamelCase : int ):
"""simple docstring"""
if b == 0:
return (1, 0)
((A__) , (A__)) : Union[str, Any] =extended_euclid(UpperCamelCase , a % b )
A__ : Dict =a // b
return (y, x - k * y)
def lowercase ( UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int ):
"""simple docstring"""
((A__) , (A__)) : Dict =extended_euclid(UpperCamelCase , UpperCamelCase )
A__ : Dict =na * na
A__ : Any =ra * x * na + ra * y * na
return (n % m + m) % m
def lowercase ( UpperCamelCase : int , UpperCamelCase : int ):
"""simple docstring"""
((A__) , (A__)) : Any =extended_euclid(UpperCamelCase , UpperCamelCase )
if b < 0:
A__ : Tuple =(b % n + n) % n
return b
def lowercase ( UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int ):
"""simple docstring"""
A__ , A__ : int =invert_modulo(UpperCamelCase , UpperCamelCase ), invert_modulo(UpperCamelCase , UpperCamelCase )
A__ : Any =na * na
A__ : Any =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 595 | """simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : int = 16 , UpperCamelCase__ : int = 88 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 1 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : int = 32 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : Optional[int] = None , ):
super().__init__()
A__ : Dict =nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , in_channels=UpperCamelCase__ , num_layers=UpperCamelCase__ , dropout=UpperCamelCase__ , norm_num_groups=UpperCamelCase__ , cross_attention_dim=UpperCamelCase__ , attention_bias=UpperCamelCase__ , sample_size=UpperCamelCase__ , num_vector_embeds=UpperCamelCase__ , activation_fn=UpperCamelCase__ , num_embeds_ada_norm=UpperCamelCase__ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
A__ : Dict =0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
A__ : List[str] =[77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
A__ : Union[str, Any] =[1, 0]
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : bool = True , ):
A__ : Optional[Any] =hidden_states
A__ : Union[str, Any] =[]
A__ : str =0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
A__ : Dict =encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
A__ : str =self.transformer_index_for_condition[i]
A__ : Optional[int] =self.transformers[transformer_index](
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , timestep=UpperCamelCase__ , cross_attention_kwargs=UpperCamelCase__ , return_dict=UpperCamelCase__ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
A__ : Union[str, Any] =encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
A__ : Dict =output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=UpperCamelCase__ )
| 595 | 1 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = list(range(len(A__ ) ) )
__lowercase = [v / w for v, w in zip(A__ , A__ )]
index.sort(key=lambda A__ : ratio[i] , reverse=A__ )
__lowercase = 0
__lowercase = [0] * len(A__ )
for i in index:
if weight[i] <= capacity:
__lowercase = 1
max_value += value[i]
capacity -= weight[i]
else:
__lowercase = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 41 | 1 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Optional[int] = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
__lowerCamelCase : Tuple = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {
"""word_embeddings.weight""": """word_embeddings.weight""",
"""word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""",
"""word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""",
"""weight""": """ln_f.weight""",
"""bias""": """ln_f.bias""",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
SCREAMING_SNAKE_CASE__ = int(re.match(r""".*layer_(\d*).*""" , __UpperCamelCase )[1] )
layer_number -= 3
return f"""h.{layer_number}.""" + key
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
SCREAMING_SNAKE_CASE__ = re.search(r"""[^\d](\d+)$""" , str(__UpperCamelCase ) )
if bit_search is None:
raise ValueError(f"""`dtype` is not a valid dtype: {dtype}.""" )
SCREAMING_SNAKE_CASE__ = int(bit_search.groups()[0] )
return bit_size // 8
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
if bloom_config_file == "":
SCREAMING_SNAKE_CASE__ = BloomConfig()
else:
SCREAMING_SNAKE_CASE__ = BloomConfig.from_json_file(__UpperCamelCase )
if shard_model:
SCREAMING_SNAKE_CASE__ = os.listdir(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = sorted(filter(lambda __UpperCamelCase : s.startswith("""layer""" ) and "model_00" in s , __UpperCamelCase ) )
SCREAMING_SNAKE_CASE__ = {"""weight_map""": {}, """metadata""": {}}
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = BloomConfig()
for j, file in enumerate(__UpperCamelCase ):
print("""Processing file: {}""".format(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE__ = None
for i in range(__UpperCamelCase ):
# load all TP files
SCREAMING_SNAKE_CASE__ = file.replace("""model_00""" , f"""model_0{i}""" )
SCREAMING_SNAKE_CASE__ = torch.load(os.path.join(__UpperCamelCase , __UpperCamelCase ) , map_location="""cpu""" )
# Rename keys in the transformers names
SCREAMING_SNAKE_CASE__ = list(temp.keys() )
for key in keys:
SCREAMING_SNAKE_CASE__ = temp.pop(__UpperCamelCase )
if tensors is None:
SCREAMING_SNAKE_CASE__ = temp
else:
for key in tensors.keys():
if any(key.endswith(__UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
SCREAMING_SNAKE_CASE__ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
SCREAMING_SNAKE_CASE__ = torch.cat([tensors[key], temp[key]] , dim=__UpperCamelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
SCREAMING_SNAKE_CASE__ = tensors[key] / pretraining_tp
torch.save(
__UpperCamelCase , os.path.join(
__UpperCamelCase , """pytorch_model_{}-of-{}.bin""".format(str(j + 1 ).zfill(5 ) , str(len(__UpperCamelCase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
SCREAMING_SNAKE_CASE__ = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
SCREAMING_SNAKE_CASE__ = """pytorch_model_{}-of-{}.bin""".format(
str(j + 1 ).zfill(5 ) , str(len(__UpperCamelCase ) ).zfill(5 ) )
SCREAMING_SNAKE_CASE__ = BloomConfig()
SCREAMING_SNAKE_CASE__ = pytorch_dump_folder_path + """/""" + CONFIG_NAME
SCREAMING_SNAKE_CASE__ = total_size
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(__UpperCamelCase , WEIGHTS_NAME + """.index.json""" ) , """w""" , encoding="""utf-8""" ) as f:
SCREAMING_SNAKE_CASE__ = json.dumps(__UpperCamelCase , indent=2 , sort_keys=__UpperCamelCase ) + """\n"""
f.write(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE__ = BloomModel(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = os.listdir(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = sorted(filter(lambda __UpperCamelCase : s.startswith("""layer""" ) and "model_00" in s , __UpperCamelCase ) )
SCREAMING_SNAKE_CASE__ = None
for i, file in enumerate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = None
for i in range(__UpperCamelCase ):
# load all TP files
SCREAMING_SNAKE_CASE__ = file.replace("""model_00""" , f"""model_0{i}""" )
SCREAMING_SNAKE_CASE__ = torch.load(os.path.join(__UpperCamelCase , __UpperCamelCase ) , map_location="""cpu""" )
# Rename keys in the transformers names
SCREAMING_SNAKE_CASE__ = list(temp.keys() )
for key in keys:
SCREAMING_SNAKE_CASE__ = temp.pop(__UpperCamelCase )
if tensors is None:
SCREAMING_SNAKE_CASE__ = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(__UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
SCREAMING_SNAKE_CASE__ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
SCREAMING_SNAKE_CASE__ = torch.cat([tensors[key], temp[key]] , dim=__UpperCamelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
SCREAMING_SNAKE_CASE__ = tensors[key] / pretraining_tp
SCREAMING_SNAKE_CASE__ = model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
assert not other_keys.unexpected_keys, f"""The keys {other_keys.unexpected_keys} are unexpected"""
if missing_keys is None:
SCREAMING_SNAKE_CASE__ = set(other_keys.missing_keys )
else:
SCREAMING_SNAKE_CASE__ = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f"""The keys {missing_keys} are missing"""
# Save pytorch-model
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
SCREAMING_SNAKE_CASE__ = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" )
if config.torch_dtype is not None:
SCREAMING_SNAKE_CASE__ = model.to(config.torch_dtype )
torch.save(model.state_dict() , __UpperCamelCase )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
__lowerCamelCase : Dict = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 708 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : str = {
'''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''',
}
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = "nllb-moe"
lowerCAmelCase_ = ["past_key_values"]
lowerCAmelCase_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : int , _lowercase : Tuple=12_81_12 , _lowercase : List[Any]=10_24 , _lowercase : Any=12 , _lowercase : List[Any]=40_96 , _lowercase : str=16 , _lowercase : str=12 , _lowercase : Optional[int]=40_96 , _lowercase : List[Any]=16 , _lowercase : str=0.05 , _lowercase : Tuple=0.05 , _lowercase : str=True , _lowercase : List[str]=True , _lowercase : Optional[Any]="relu" , _lowercase : str=10_24 , _lowercase : Tuple=0.1 , _lowercase : int=0.1 , _lowercase : Dict=0.0 , _lowercase : List[str]=0.02 , _lowercase : int=2 , _lowercase : Optional[Any]=True , _lowercase : List[Any]=False , _lowercase : List[str]="float32" , _lowercase : Optional[Any]=False , _lowercase : str=1_28 , _lowercase : int=64 , _lowercase : Optional[int]=4 , _lowercase : List[str]=4 , _lowercase : Union[str, Any]=0.0_01 , _lowercase : List[Any]=0.0_01 , _lowercase : List[str]="all" , _lowercase : Optional[Any]=False , _lowercase : int=False , _lowercase : Tuple=1.0 , _lowercase : Optional[int]=0.2 , _lowercase : Optional[int]=1 , _lowercase : List[Any]=0 , _lowercase : List[Any]=2 , _lowercase : int=False , **_lowercase : Union[str, Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = d_model
SCREAMING_SNAKE_CASE__ = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ = encoder_layers
SCREAMING_SNAKE_CASE__ = encoder_attention_heads
SCREAMING_SNAKE_CASE__ = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ = decoder_layers
SCREAMING_SNAKE_CASE__ = decoder_attention_heads
SCREAMING_SNAKE_CASE__ = dropout
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = activation_dropout
SCREAMING_SNAKE_CASE__ = activation_function
SCREAMING_SNAKE_CASE__ = init_std
SCREAMING_SNAKE_CASE__ = encoder_layerdrop
SCREAMING_SNAKE_CASE__ = decoder_layerdrop
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = encoder_layers
SCREAMING_SNAKE_CASE__ = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE__ = router_z_loss_coef
SCREAMING_SNAKE_CASE__ = router_aux_loss_coef
SCREAMING_SNAKE_CASE__ = decoder_sparse_step
SCREAMING_SNAKE_CASE__ = encoder_sparse_step
SCREAMING_SNAKE_CASE__ = num_experts
SCREAMING_SNAKE_CASE__ = expert_capacity
SCREAMING_SNAKE_CASE__ = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
SCREAMING_SNAKE_CASE__ = router_dtype
SCREAMING_SNAKE_CASE__ = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE__ = batch_prioritized_routing
SCREAMING_SNAKE_CASE__ = second_expert_policy
SCREAMING_SNAKE_CASE__ = normalize_router_prob_before_dropping
SCREAMING_SNAKE_CASE__ = moe_eval_capacity_token_fraction
SCREAMING_SNAKE_CASE__ = moe_token_dropout
SCREAMING_SNAKE_CASE__ = output_router_logits
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , **_lowercase , )
| 379 | 0 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
snake_case = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
snake_case = {
"""b0""": {
"""hidden_dim""": 1_2_8_0,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 2_2_4,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_2_8_0,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 2_4_0,
"""dropout_rate""": 0.2,
"""dw_padding""": [1_6],
},
"""b2""": {
"""hidden_dim""": 1_4_0_8,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 2_6_0,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 1_6],
},
"""b3""": {
"""hidden_dim""": 1_5_3_6,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 3_0_0,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 1_8],
},
"""b4""": {
"""hidden_dim""": 1_7_9_2,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 3_8_0,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_0_4_8,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 4_5_6,
"""dropout_rate""": 0.4,
"""dw_padding""": [1_3, 2_7],
},
"""b6""": {
"""hidden_dim""": 2_3_0_4,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 5_2_8,
"""dropout_rate""": 0.5,
"""dw_padding""": [3_1],
},
"""b7""": {
"""hidden_dim""": 2_5_6_0,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 6_0_0,
"""dropout_rate""": 0.5,
"""dw_padding""": [1_8],
},
}
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> Tuple:
_lowercase = EfficientNetConfig()
_lowercase = CONFIG_MAP[model_name]['hidden_dim']
_lowercase = CONFIG_MAP[model_name]['width_coef']
_lowercase = CONFIG_MAP[model_name]['depth_coef']
_lowercase = CONFIG_MAP[model_name]['image_size']
_lowercase = CONFIG_MAP[model_name]['dropout_rate']
_lowercase = CONFIG_MAP[model_name]['dw_padding']
_lowercase = 'huggingface/label-files'
_lowercase = 'imagenet-1k-id2label.json'
_lowercase = 1000
_lowercase = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
_lowercase = {int(snake_case__ ): v for k, v in idalabel.items()}
_lowercase = idalabel
_lowercase = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
_lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] ) -> Tuple:
_lowercase = CONFIG_MAP[model_name]['image_size']
_lowercase = EfficientNetImageProcessor(
size={'height': size, 'width': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=snake_case__ , )
return preprocessor
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Union[str, Any] ) -> str:
_lowercase = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )]
_lowercase = sorted(set(snake_case__ ) )
_lowercase = len(snake_case__ )
_lowercase = {b: str(snake_case__ ) for b, i in zip(snake_case__ , range(snake_case__ ) )}
_lowercase = []
rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') )
rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') )
rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') )
rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') )
rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') )
for b in block_names:
_lowercase = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') )
rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') )
rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') )
rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') )
rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') )
_lowercase = {}
for item in rename_keys:
if item[0] in original_param_names:
_lowercase = 'efficientnet.' + item[1]
_lowercase = 'classifier.weight'
_lowercase = 'classifier.bias'
return key_mapping
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Tuple , snake_case__ :Dict , snake_case__ :List[Any] ) -> List[Any]:
for key, value in tf_params.items():
if "normalization" in key:
continue
_lowercase = key_mapping[key]
if "_conv" in key and "kernel" in key:
_lowercase = torch.from_numpy(snake_case__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
_lowercase = torch.from_numpy(snake_case__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
_lowercase = torch.from_numpy(np.transpose(snake_case__ ) )
else:
_lowercase = torch.from_numpy(snake_case__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(snake_case__ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any , snake_case__ :str , snake_case__ :Optional[Any] , snake_case__ :Tuple ) -> List[Any]:
_lowercase = model_classes[model_name](
include_top=snake_case__ , weights='imagenet' , input_tensor=snake_case__ , input_shape=snake_case__ , pooling=snake_case__ , classes=1000 , classifier_activation='softmax' , )
_lowercase = original_model.trainable_variables
_lowercase = original_model.non_trainable_variables
_lowercase = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
_lowercase = param.numpy()
_lowercase = list(tf_params.keys() )
# Load HuggingFace model
_lowercase = get_efficientnet_config(snake_case__ )
_lowercase = EfficientNetForImageClassification(snake_case__ ).eval()
_lowercase = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('Converting parameters...' )
_lowercase = rename_keys(snake_case__ )
replace_params(snake_case__ , snake_case__ , snake_case__ )
# Initialize preprocessor and preprocess input image
_lowercase = convert_image_processor(snake_case__ )
_lowercase = preprocessor(images=prepare_img() , return_tensors='pt' )
# HF model inference
hf_model.eval()
with torch.no_grad():
_lowercase = hf_model(**snake_case__ )
_lowercase = outputs.logits.detach().numpy()
# Original model inference
_lowercase = False
_lowercase = CONFIG_MAP[model_name]['image_size']
_lowercase = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
_lowercase = image.img_to_array(snake_case__ )
_lowercase = np.expand_dims(snake_case__ , axis=0 )
_lowercase = original_model.predict(snake_case__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(snake_case__ , snake_case__ , atol=1E-3 ), "The predicted logits are not the same."
print('Model outputs match!' )
if save_model:
# Create folder to save model
if not os.path.isdir(snake_case__ ):
os.mkdir(snake_case__ )
# Save converted model and image processor
hf_model.save_pretrained(snake_case__ )
preprocessor.save_pretrained(snake_case__ )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
_lowercase = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(snake_case__ )
hf_model.push_to_hub(snake_case__ )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
snake_case = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub) | 67 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase_ : List[str] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {'vocab_file': 'spiece.model'}
lowerCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
lowerCAmelCase_ : str = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class SCREAMING_SNAKE_CASE ( snake_case_ ):
__magic_name__ : List[Any] = VOCAB_FILES_NAMES
__magic_name__ : Dict = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , lowercase__ : Optional[Any] , lowercase__ : Optional[Any]=False , lowercase__ : Any=False , lowercase__ : Optional[Any]=False , lowercase__ : Dict=None , lowercase__ : Any=None , lowercase__ : List[str]=None , lowercase__ : int=None , lowercase__ : Optional[Dict[str, Any]] = None , **lowercase__ : List[str] , ):
'''simple docstring'''
a_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
a_ : Optional[Any] = kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
a_ : List[Any] = """None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
a_ : Union[str, Any] = """<|endoftext|>""" if eos_token is None else eos_token
a_ : Dict = """<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
a_ : Tuple = unk_token if pad_token is None else pad_token
a_ : Any = eos_token if bos_token is None else bos_token
else:
a_ : Optional[int] = """<pad>""" if pad_token is None else pad_token
a_ : str = """<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=lowercase__ , remove_space=lowercase__ , keep_accents=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , **lowercase__ , )
a_ : Optional[int] = do_lower_case
a_ : Any = remove_space
a_ : List[str] = keep_accents
a_ : int = vocab_file
a_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase__ )
# Used for whitespace normalization in input texts
# fmt : off
a_ : Dict = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
a_ : Tuple = re.compile(
F"[{''.join(map(lowercase__ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]" )
def __getstate__( self : str ):
'''simple docstring'''
a_ : Optional[int] = self.__dict__.copy()
a_ : Tuple = None
return state
def __setstate__( self : Tuple , lowercase__ : Optional[int] ):
'''simple docstring'''
a_ : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
a_ : Union[str, Any] = {}
a_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
return len(self.sp_model )
def lowercase_ ( self : Union[str, Any] , lowercase__ : str ):
'''simple docstring'''
a_ : Optional[int] = self.non_printing_characters_re.sub("""""" , lowercase__ )
# Normalize whitespaces
a_ : List[Any] = """""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
a_ : str = unicodedata.normalize("""NFC""" , lowercase__ )
return text
def lowercase_ ( self : Tuple , lowercase__ : str , **lowercase__ : Dict ):
'''simple docstring'''
a_ : Dict = self.preprocess_text(lowercase__ )
return self.sp_model.encode(lowercase__ , out_type=lowercase__ )
def lowercase_ ( self : Optional[int] , lowercase__ : str ):
'''simple docstring'''
return self.sp_model.PieceToId(lowercase__ )
def lowercase_ ( self : Dict , lowercase__ : int ):
'''simple docstring'''
return self.sp_model.IdToPiece(lowercase__ )
@staticmethod
def lowercase_ ( lowercase__ : str ):
'''simple docstring'''
return out_string
def lowercase_ ( self : Union[str, Any] , lowercase__ : List[str] ):
'''simple docstring'''
a_ : Dict = []
a_ : int = """"""
a_ : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase__ ) + token
a_ : int = True
a_ : Optional[int] = []
else:
current_sub_tokens.append(lowercase__ )
a_ : List[str] = False
out_string += self.sp_model.decode(lowercase__ )
return out_string
def lowercase_ ( self : Dict ):
'''simple docstring'''
a_ : List[Any] = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase_ ( self : Optional[Any] , lowercase__ : str , lowercase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowercase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
a_ : Any = os.path.join(
lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ , """wb""" ) as fi:
a_ : int = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
def lowercase_ ( self : Union[str, Any] , lowercase__ : Union[str, List[str]] , lowercase__ : Union[str, bool] = False ):
'''simple docstring'''
if isinstance(lowercase__ , lowercase__ ):
a_ : List[str] = self.preprocess_text(lowercase__ )
a_ : str = self.sp_model.encode(lowercase__ )
else:
a_ : Optional[Any] = [self.preprocess_text(lowercase__ ) for t in text]
a_ : Any = self.sp_model.encode(lowercase__ )
if return_tensors is True or return_tensors == "pt":
a_ : Tuple = torch.tensor(lowercase__ )
return token_ids
def lowercase_ ( self : List[Any] , lowercase__ : Union[int, List[int]] ):
'''simple docstring'''
return self.sp_model.decode(lowercase__ )
def lowercase_ ( self : Dict , lowercase__ : "Conversation" ):
'''simple docstring'''
a_ : Dict = [F"User: {text}" if is_user else F"Bot: {text}" for is_user, text in conversation.iter_texts()]
a_ : Any = (
F"{self.eos_token}{self.bos_token}" + F"{self.bos_token}".join(lowercase__ ) + F"{self.bos_token}Bot:"
)
return self.encode(text=lowercase__ )
| 442 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Any = 'naver-clova-ix/donut-base-finetuned-docvqa'
UpperCAmelCase__ : List[Any] = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
UpperCAmelCase__ : List[str] = 'document_qa'
UpperCAmelCase__ : Dict = AutoProcessor
UpperCAmelCase__ : List[Any] = VisionEncoderDecoderModel
UpperCAmelCase__ : str = ['image', 'text']
UpperCAmelCase__ : List[Any] = ['text']
def __init__( self: List[Any] , *UpperCamelCase_: Any , **UpperCamelCase_: List[Any] ):
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: "Image" , UpperCamelCase_: str ):
__lowerCamelCase = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
__lowerCamelCase = task_prompt.replace("""{user_input}""" , UpperCamelCase_ )
__lowerCamelCase = self.pre_processor.tokenizer(
UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors="""pt""" ).input_ids
__lowerCamelCase = self.pre_processor(UpperCamelCase_ , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Union[str, Any] ):
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=UpperCamelCase_ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=UpperCamelCase_ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=UpperCamelCase_ , ).sequences
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Dict ):
__lowerCamelCase = self.pre_processor.batch_decode(UpperCamelCase_ )[0]
__lowerCamelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
__lowerCamelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
__lowerCamelCase = re.sub(r"""<.*?>""" , """""" , UpperCamelCase_ , count=1 ).strip() # remove first task start token
__lowerCamelCase = self.pre_processor.tokenajson(UpperCamelCase_ )
return sequence["answer"]
| 80 |
import os
from collections.abc import Iterator
def lowerCamelCase__ ( A__ : str = "." ):
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(A__ ):
__lowerCamelCase = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(A__ )[1] in (".py", ".ipynb"):
yield os.path.join(A__ , A__ ).lstrip("""./""" )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return f'{i * " "}*' if i else "\n##"
def lowerCamelCase__ ( A__ : str , A__ : str ):
'''simple docstring'''
__lowerCamelCase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(A__ ) or old_parts[i] != new_part) and new_part:
print(f'{md_prefix(A__ )} {new_part.replace("_" , " " ).title()}' )
return new_path
def lowerCamelCase__ ( A__ : str = "." ):
'''simple docstring'''
__lowerCamelCase = """"""
for filepath in sorted(good_file_paths(A__ ) ):
__lowerCamelCase, __lowerCamelCase = os.path.split(A__ )
if filepath != old_path:
__lowerCamelCase = print_path(A__ , A__ )
__lowerCamelCase = (filepath.count(os.sep ) + 1) if filepath else 0
__lowerCamelCase = f'{filepath}/{filename}'.replace(""" """ , """%20""" )
__lowerCamelCase = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(f'{md_prefix(A__ )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('.')
| 80 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.