code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def __lowerCamelCase ( __UpperCamelCase ) -> List[Any]:
"""simple docstring"""
return EnvironmentCommand()
class __lowerCamelCase ( A__ ):
'''simple docstring'''
@staticmethod
def lowerCamelCase ( a_ : ArgumentParser ):
lowerCAmelCase_ : Dict = parser.add_parser("env" )
download_parser.set_defaults(func=a_ )
def lowerCamelCase ( self : Union[str, Any] ):
lowerCAmelCase_ : Tuple = huggingface_hub.__version__
lowerCAmelCase_ : str = "not installed"
lowerCAmelCase_ : str = "NA"
if is_torch_available():
import torch
lowerCAmelCase_ : Any = torch.__version__
lowerCAmelCase_ : Dict = torch.cuda.is_available()
lowerCAmelCase_ : Dict = "not installed"
if is_transformers_available():
import transformers
lowerCAmelCase_ : Union[str, Any] = transformers.__version__
lowerCAmelCase_ : Optional[int] = "not installed"
if is_accelerate_available():
import accelerate
lowerCAmelCase_ : Optional[Any] = accelerate.__version__
lowerCAmelCase_ : int = "not installed"
if is_xformers_available():
import xformers
lowerCAmelCase_ : Any = xformers.__version__
lowerCAmelCase_ : Optional[int] = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": f'''{pt_version} ({pt_cuda_available})''',
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(a_ ) )
return info
@staticmethod
def lowerCamelCase ( a_ : Any ):
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 241 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase__ = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""CLIPFeatureExtractor"""]
lowercase__ = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 241 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , ) -> List[str]:
lowerCAmelCase = size if size is not None else {"""height""": 18, """width""": 18}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean
lowerCAmelCase = image_std
def __snake_case ( self ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __snake_case( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Dict = DPTImageProcessor if is_vision_available() else None
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase = DPTImageProcessingTester(self )
@property
def __snake_case ( self ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , """image_mean""" ) )
self.assertTrue(hasattr(A_ , """image_std""" ) )
self.assertTrue(hasattr(A_ , """do_normalize""" ) )
self.assertTrue(hasattr(A_ , """do_resize""" ) )
self.assertTrue(hasattr(A_ , """size""" ) )
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def __snake_case ( self ) -> List[Any]:
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(A_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __snake_case ( self ) -> str:
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(A_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __snake_case ( self ) -> int:
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(A_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , ) | 351 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = '▁'
UpperCAmelCase = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
UpperCAmelCase = {
'facebook/m2m100_418M': 1024,
}
# fmt: off
UpperCAmelCase = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : int = ["input_ids", "attention_mask"]
UpperCAmelCase : List[int] = []
UpperCAmelCase : List[int] = []
def __init__( self , A_ , A_ , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<pad>" , A_="<unk>" , A_="m2m100" , A_ = None , A_=8 , **A_ , ) -> None:
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase = language_codes
lowerCAmelCase = FAIRSEQ_LANGUAGE_CODES[language_codes]
lowerCAmelCase = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
lowerCAmelCase = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(A_ )
for lang_code in fairseq_language_code
if self.get_lang_token(A_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=A_ , tgt_lang=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , unk_token=A_ , pad_token=A_ , language_codes=A_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=A_ , **A_ , )
lowerCAmelCase = vocab_file
lowerCAmelCase = load_json(A_ )
lowerCAmelCase = {v: k for k, v in self.encoder.items()}
lowerCAmelCase = spm_file
lowerCAmelCase = load_spm(A_ , self.sp_model_kwargs )
lowerCAmelCase = len(self.encoder )
lowerCAmelCase = {
self.get_lang_token(A_ ): self.encoder_size + i for i, lang_code in enumerate(A_ )
}
lowerCAmelCase = {lang_code: self.encoder_size + i for i, lang_code in enumerate(A_ )}
lowerCAmelCase = {v: k for k, v in self.lang_token_to_id.items()}
lowerCAmelCase = src_lang if src_lang is not None else """en"""
lowerCAmelCase = tgt_lang
lowerCAmelCase = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
lowerCAmelCase = num_madeup_words
@property
def __snake_case ( self ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __snake_case ( self ) -> str:
return self._src_lang
@src_lang.setter
def __snake_case ( self , A_ ) -> None:
lowerCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __snake_case ( self , A_ ) -> List[str]:
return self.sp_model.encode(A_ , out_type=A_ )
def __snake_case ( self , A_ ) -> Any:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(A_ , self.encoder[self.unk_token] )
def __snake_case ( self , A_ ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(A_ , self.unk_token )
def __snake_case ( self , A_ ) -> List[str]:
lowerCAmelCase = []
lowerCAmelCase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
lowerCAmelCase = []
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def __snake_case ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
lowerCAmelCase = [1] * len(self.prefix_tokens )
lowerCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A_ )) + suffix_ones
return prefix_ones + ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def __snake_case ( self , A_ , A_ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __snake_case ( self ) -> Dict:
lowerCAmelCase = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , A_ ) -> None:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase = {}
lowerCAmelCase = load_spm(self.spm_file , self.sp_model_kwargs )
def __snake_case ( self , A_ , A_ = None ) -> Tuple[str]:
lowerCAmelCase = Path(A_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
lowerCAmelCase = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
lowerCAmelCase = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , A_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(A_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , A_ )
elif not os.path.isfile(self.spm_file ):
with open(A_ , """wb""" ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (str(A_ ), str(A_ ))
def __snake_case ( self , A_ , A_ = "en" , A_ = None , A_ = "ro" , **A_ , ) -> BatchEncoding:
lowerCAmelCase = src_lang
lowerCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(A_ , A_ , **A_ )
def __snake_case ( self , A_ , A_ , A_ , **A_ ) -> str:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
lowerCAmelCase = src_lang
lowerCAmelCase = self(A_ , add_special_tokens=A_ , **A_ )
lowerCAmelCase = self.get_lang_id(A_ )
lowerCAmelCase = tgt_lang_id
return inputs
def __snake_case ( self ) -> Any:
self.set_src_lang_special_tokens(self.src_lang )
def __snake_case ( self ) -> Optional[int]:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __snake_case ( self , A_ ) -> None:
lowerCAmelCase = self.get_lang_token(A_ )
lowerCAmelCase = self.lang_token_to_id[lang_token]
lowerCAmelCase = [self.cur_lang_id]
lowerCAmelCase = [self.eos_token_id]
def __snake_case ( self , A_ ) -> None:
lowerCAmelCase = self.get_lang_token(A_ )
lowerCAmelCase = self.lang_token_to_id[lang_token]
lowerCAmelCase = [self.cur_lang_id]
lowerCAmelCase = [self.eos_token_id]
def __snake_case ( self , A_ ) -> str:
return self.lang_code_to_token[lang]
def __snake_case ( self , A_ ) -> int:
lowerCAmelCase = self.get_lang_token(A_ )
return self.lang_token_to_id[lang_token]
def _snake_case ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
"""simple docstring"""
lowerCAmelCase = sentencepiece.SentencePieceProcessor(**_SCREAMING_SNAKE_CASE )
spm.Load(str(_SCREAMING_SNAKE_CASE ) )
return spm
def _snake_case ( _SCREAMING_SNAKE_CASE : str ) -> Union[Dict, List]:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , """r""" ) as f:
return json.load(_SCREAMING_SNAKE_CASE )
def _snake_case ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : str ) -> None:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , indent=2 ) | 187 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 264 | """simple docstring"""
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowercase__ = logging.get_logger('transformers.models.encodec')
lowercase__ = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
lowercase__ = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
lowercase__ = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
lowercase__ = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
lowercase__ = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
lowercase__ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowercase__ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowercase__ = []
lowercase__ = []
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
for attribute in key.split('.' ):
a__: str = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
a__: List[str] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
a__: Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
a__: str = value
elif weight_type == "weight_g":
a__: int = value
elif weight_type == "weight_v":
a__: Tuple = value
elif weight_type == "bias":
a__: Dict = value
elif weight_type == "running_mean":
a__: Any = value
elif weight_type == "running_var":
a__: Tuple = value
elif weight_type == "num_batches_tracked":
a__: List[str] = value
elif weight_type == "weight_ih_l0":
a__: List[Any] = value
elif weight_type == "weight_hh_l0":
a__: List[Any] = value
elif weight_type == "bias_ih_l0":
a__: List[Any] = value
elif weight_type == "bias_hh_l0":
a__: List[Any] = value
elif weight_type == "weight_ih_l1":
a__: int = value
elif weight_type == "weight_hh_l1":
a__: str = value
elif weight_type == "bias_ih_l1":
a__: Union[str, Any] = value
elif weight_type == "bias_hh_l1":
a__: Any = value
else:
a__: Union[str, Any] = value
logger.info(F'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
a__ , a__: Optional[Any] = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
a__: List[Any] = []
if model_name == "encodec_24khz" or "encodec_32khz":
a__: Optional[int] = MAPPING_24K
elif model_name == "encodec_48khz":
a__: List[Any] = MAPPING_48K
else:
raise ValueError(F'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
logger.info(F'{name} was ignored' )
continue
a__: int = False
for key, mapped_key in MAPPING.items():
if "*" in key:
a__ , a__: str = key.split('.*.' )
if prefix in name and suffix in name:
a__: List[str] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
a__: List[str] = True
if "*" in mapped_key:
a__: List[str] = name.split(_SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
a__: str = mapped_key.replace('*' , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
a__: int = 'weight_g'
elif "weight_v" in name:
a__: Dict = 'weight_v'
elif "weight_ih_l0" in name:
a__: int = 'weight_ih_l0'
elif "weight_hh_l0" in name:
a__: Union[str, Any] = 'weight_hh_l0'
elif "bias_ih_l0" in name:
a__: Optional[Any] = 'bias_ih_l0'
elif "bias_hh_l0" in name:
a__: Optional[int] = 'bias_hh_l0'
elif "weight_ih_l1" in name:
a__: Dict = 'weight_ih_l1'
elif "weight_hh_l1" in name:
a__: Optional[Any] = 'weight_hh_l1'
elif "bias_ih_l1" in name:
a__: List[str] = 'bias_ih_l1'
elif "bias_hh_l1" in name:
a__: Optional[Any] = 'bias_hh_l1'
elif "bias" in name:
a__: List[str] = 'bias'
elif "weight" in name:
a__: Any = 'weight'
elif "running_mean" in name:
a__: Dict = 'running_mean'
elif "running_var" in name:
a__: Dict = 'running_var'
elif "num_batches_tracked" in name:
a__: Dict = 'num_batches_tracked'
else:
a__: List[str] = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(F'Unused weights: {unused_weights}' )
@torch.no_grad()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) ->int:
if config_path is not None:
a__: Dict = EncodecConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
a__: Tuple = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
a__: Any = [8, 5, 4, 4]
a__: List[str] = [2.2]
a__: List[Any] = 64
a__: Dict = 32000
a__: Union[str, Any] = 2048
a__: Union[str, Any] = False
a__: Any = False
a__: Optional[Any] = False
elif model_name == "encodec_48khz":
a__: Optional[int] = [8, 5, 4, 2]
a__: Union[str, Any] = [3.0, 6.0, 12.0, 24.0]
a__: List[str] = 48000
a__: Tuple = 2
a__: Optional[Any] = False
a__: Optional[int] = 'time_group_norm'
a__: Union[str, Any] = True
a__: Dict = 1.0
a__: str = 0.01
else:
raise ValueError(F'Unknown model name: {model_name}' )
a__: Optional[int] = EncodecModel(_SCREAMING_SNAKE_CASE )
a__: List[str] = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
a__: int = torch.load(_SCREAMING_SNAKE_CASE )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
a__: str = original_checkpoint['best_state']
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(_SCREAMING_SNAKE_CASE )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
lowercase__ = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 290 | 0 |
import copy
import re
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = 'hp'
lowerCAmelCase__ = {}
lowerCAmelCase__ = None
@classmethod
def SCREAMING_SNAKE_CASE_( cls , lowercase , lowercase ) -> Tuple:
lowerCamelCase_ = prefix
lowerCamelCase_ = defaults
cls.build_naming_info()
@staticmethod
def SCREAMING_SNAKE_CASE_( lowercase , lowercase ) -> Optional[Any]:
if len(lowercase ) == 0:
return ""
lowerCamelCase_ = None
if any(char.isdigit() for char in word ):
raise Exception(f'Parameters should not contain numbers: \'{word}\' contains a number' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(lowercase ) + 1 ):
lowerCamelCase_ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
lowerCamelCase_ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowercase ):
lowerCamelCase_ = ""
while integer != 0:
lowerCamelCase_ = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
lowerCamelCase_ = 0
while True:
lowerCamelCase_ = word + "#" + int_to_alphabetic(lowercase )
if sword in info["reverse_short_word"]:
continue
else:
lowerCamelCase_ = sword
break
lowerCamelCase_ = short_word
lowerCamelCase_ = word
return short_word
@staticmethod
def SCREAMING_SNAKE_CASE_( lowercase , lowercase ) -> int:
lowerCamelCase_ = param_name.split("_" )
lowerCamelCase_ = [TrialShortNamer.shortname_for_word(lowercase , lowercase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
lowerCamelCase_ = ["", "_"]
for separator in separators:
lowerCamelCase_ = separator.join(lowercase )
if shortname not in info["reverse_short_param"]:
lowerCamelCase_ = shortname
lowerCamelCase_ = param_name
return shortname
return param_name
@staticmethod
def SCREAMING_SNAKE_CASE_( lowercase , lowercase ) -> Optional[Any]:
lowerCamelCase_ = TrialShortNamer.shortname_for_key(lowercase , lowercase )
lowerCamelCase_ = short_name
lowerCamelCase_ = param_name
@classmethod
def SCREAMING_SNAKE_CASE_( cls ) -> Dict:
if cls.NAMING_INFO is not None:
return
lowerCamelCase_ = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
lowerCamelCase_ = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(lowercase , lowercase )
lowerCamelCase_ = info
@classmethod
def SCREAMING_SNAKE_CASE_( cls , lowercase ) -> Optional[int]:
cls.build_naming_info()
assert cls.PREFIX is not None
lowerCamelCase_ = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'You should provide a default value for the param name {k} with value {v}' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
lowerCamelCase_ = cls.NAMING_INFO["short_param"][k]
if isinstance(lowercase , lowercase ):
lowerCamelCase_ = 1 if v else 0
lowerCamelCase_ = "" if isinstance(lowercase , (int, float) ) else "-"
lowerCamelCase_ = f'{key}{sep}{v}'
name.append(lowercase )
return "_".join(lowercase )
@classmethod
def SCREAMING_SNAKE_CASE_( cls , lowercase ) -> List[Any]:
lowerCamelCase_ = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
lowerCamelCase_ = []
else:
lowerCamelCase_ = repr.split("_" )
lowerCamelCase_ = {}
for value in values:
if "-" in value:
lowerCamelCase_ , lowerCamelCase_ = value.split("-" )
else:
lowerCamelCase_ = re.sub("[0-9.]" , "" , lowercase )
lowerCamelCase_ = float(re.sub("[^0-9.]" , "" , lowercase ) )
lowerCamelCase_ = cls.NAMING_INFO["reverse_short_param"][p_k]
lowerCamelCase_ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
lowerCamelCase_ = cls.DEFAULTS[k]
return parameters
| 47 |
from sklearn.metrics import recall_score
import datasets
__A ='''
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
'''
__A ='''
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{\'recall\': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{\'recall\': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{\'recall\': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'recall\': array([1., 0., 0.])}
'''
__A ='''
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=None , lowercase=1 , lowercase="binary" , lowercase=None , lowercase="warn" , ) -> Optional[int]:
lowerCamelCase_ = recall_score(
lowercase , lowercase , labels=lowercase , pos_label=lowercase , average=lowercase , sample_weight=lowercase , zero_division=lowercase , )
return {"recall": float(lowercase ) if score.size == 1 else score}
| 47 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
a_ = None
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
a_ = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
a_ = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
a_ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =['input_ids', 'attention_mask']
lowerCamelCase__ =MBartTokenizer
lowerCamelCase__ =[]
lowerCamelCase__ =[]
def __init__( self : List[Any] , a : Optional[Any]=None , a : Optional[int]=None , a : Optional[int]="<s>" , a : Dict="</s>" , a : int="</s>" , a : Any="<s>" , a : List[str]="<unk>" , a : Any="<pad>" , a : List[str]="<mask>" , a : Optional[int]=None , a : Optional[int]=None , a : List[Any]=None , **a : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
vocab_file=a , tokenizer_file=a , bos_token=a , eos_token=a , sep_token=a , cls_token=a , unk_token=a , pad_token=a , mask_token=a , src_lang=a , tgt_lang=a , additional_special_tokens=a , **a , )
SCREAMING_SNAKE_CASE : Any = vocab_file
SCREAMING_SNAKE_CASE : List[Any] = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE : List[str] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
SCREAMING_SNAKE_CASE : str = {
lang_code: self.convert_tokens_to_ids(a ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
SCREAMING_SNAKE_CASE : Any = src_lang if src_lang is not None else "en_XX"
SCREAMING_SNAKE_CASE : Optional[Any] = self.convert_tokens_to_ids(self._src_lang )
SCREAMING_SNAKE_CASE : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def __UpperCamelCase ( self : List[Any] , a : str ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase ( self : List[Any] , a : List[int] , a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : List[str] , a : Optional[Any] , a : str , a : Optional[str] , a : Optional[str] , **a : Optional[Any] ) -> str:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
SCREAMING_SNAKE_CASE : List[str] = src_lang
SCREAMING_SNAKE_CASE : List[Any] = self(a , add_special_tokens=a , return_tensors=a , **a )
SCREAMING_SNAKE_CASE : List[Any] = self.convert_tokens_to_ids(a )
SCREAMING_SNAKE_CASE : Union[str, Any] = tgt_lang_id
return inputs
def __UpperCamelCase ( self : Any , a : List[str] , a : str = "en_XX" , a : Optional[List[str]] = None , a : str = "ro_RO" , **a : Tuple , ) -> BatchEncoding:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = src_lang
SCREAMING_SNAKE_CASE : Any = tgt_lang
return super().prepare_seqaseq_batch(a , a , **a )
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase ( self : Any , a : Optional[Any] ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.convert_tokens_to_ids(a )
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Optional[int] = [self.eos_token_id, self.cur_lang_code]
SCREAMING_SNAKE_CASE : str = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE : Optional[int] = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE : int = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase ( self : str , a : str ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.convert_tokens_to_ids(a )
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE : Optional[int] = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase ( self : str , a : str , a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory." )
return
SCREAMING_SNAKE_CASE : str = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,) | 76 |
from typing import Any
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , a : Any ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = data
SCREAMING_SNAKE_CASE : int = None
def __repr__( self : str ) -> str:
"""simple docstring"""
return F"Node({self.data})"
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = None
def __iter__( self : Any ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head
while node:
yield node.data
SCREAMING_SNAKE_CASE : List[str] = node.next
def __len__( self : str ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self : Optional[Any] ) -> str:
"""simple docstring"""
return "->".join([str(a ) for item in self] )
def __getitem__( self : List[Any] , a : int ) -> Any:
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Tuple , a : int , a : Any ) -> None:
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
SCREAMING_SNAKE_CASE : str = self.head
for _ in range(a ):
SCREAMING_SNAKE_CASE : str = current.next
SCREAMING_SNAKE_CASE : Any = data
def __UpperCamelCase ( self : List[str] , a : Any ) -> None:
"""simple docstring"""
self.insert_nth(len(self ) , a )
def __UpperCamelCase ( self : Union[str, Any] , a : Any ) -> None:
"""simple docstring"""
self.insert_nth(0 , a )
def __UpperCamelCase ( self : Optional[Any] , a : int , a : Any ) -> None:
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
SCREAMING_SNAKE_CASE : Any = Node(a )
if self.head is None:
SCREAMING_SNAKE_CASE : Optional[int] = new_node
elif index == 0:
SCREAMING_SNAKE_CASE : Optional[int] = self.head # link new_node to head
SCREAMING_SNAKE_CASE : List[Any] = new_node
else:
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : Optional[int] = temp.next
SCREAMING_SNAKE_CASE : Optional[int] = temp.next
SCREAMING_SNAKE_CASE : int = new_node
def __UpperCamelCase ( self : Optional[int] ) -> None: # print every node data
"""simple docstring"""
print(self )
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
return self.delete_nth(0 )
def __UpperCamelCase ( self : Any ) -> Any: # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def __UpperCamelCase ( self : List[str] , a : int = 0 ) -> Any:
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
SCREAMING_SNAKE_CASE : Tuple = self.head # default first node
if index == 0:
SCREAMING_SNAKE_CASE : List[str] = self.head.next
else:
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : Any = temp.next
SCREAMING_SNAKE_CASE : List[Any] = temp.next
SCREAMING_SNAKE_CASE : List[str] = temp.next.next
return delete_node.data
def __UpperCamelCase ( self : List[Any] ) -> bool:
"""simple docstring"""
return self.head is None
def __UpperCamelCase ( self : Optional[int] ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : str = self.head
while current:
# Store the current node's next node.
SCREAMING_SNAKE_CASE : Any = current.next
# Make the current node's next point backwards
SCREAMING_SNAKE_CASE : List[Any] = prev
# Make the previous node be the current node
SCREAMING_SNAKE_CASE : Any = current
# Make the current node the next node (to progress iteration)
SCREAMING_SNAKE_CASE : str = next_node
# Return prev in order to put the head at the end
SCREAMING_SNAKE_CASE : Optional[Any] = prev
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Union[str, Any] = LinkedList()
assert linked_list.is_empty() is True
assert str(_a) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10):
assert len(_a) == i
linked_list.insert_nth(_a , i + 1)
assert str(_a) == "->".join(str(_a) for i in range(1 , 11))
linked_list.insert_head(0)
linked_list.insert_tail(11)
assert str(_a) == "->".join(str(_a) for i in range(0 , 12))
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9) == 10
assert linked_list.delete_tail() == 11
assert len(_a) == 9
assert str(_a) == "->".join(str(_a) for i in range(1 , 10))
assert all(linked_list[i] == i + 1 for i in range(0 , 9)) is True
for i in range(0 , 9):
SCREAMING_SNAKE_CASE : str = -i
assert all(linked_list[i] == -i for i in range(0 , 9)) is True
linked_list.reverse()
assert str(_a) == "->".join(str(_a) for i in range(-8 , 1))
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Optional[Any] = [
-9,
100,
Node(77345112),
"dlrow olleH",
7,
5555,
0,
-192.5_5555,
"Hello, world!",
77.9,
Node(10),
None,
None,
12.20,
]
SCREAMING_SNAKE_CASE : List[Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(_a)
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_a) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
SCREAMING_SNAKE_CASE : List[Any] = linked_list.delete_head()
assert result == -9
assert (
str(_a) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
SCREAMING_SNAKE_CASE : Any = linked_list.delete_tail()
assert result == 12.2
assert (
str(_a) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
SCREAMING_SNAKE_CASE : Any = linked_list.delete_nth(10)
assert result is None
assert (
str(_a) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!"))
assert (
str(_a)
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_a)
assert (
str(_a)
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_a)
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowerCamelCase__ ( ):
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE : Optional[int] = LinkedList()
linked_list.insert_head(input("Inserting 1st at head ").strip())
linked_list.insert_head(input("Inserting 2nd at head ").strip())
print("\nPrint list:")
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail ").strip())
linked_list.insert_tail(input("Inserting 2nd at tail ").strip())
print("\nPrint list:")
linked_list.print_list()
print("\nDelete head")
linked_list.delete_head()
print("Delete tail")
linked_list.delete_tail()
print("\nPrint list:")
linked_list.print_list()
print("\nReverse linked list")
linked_list.reverse()
print("\nPrint list:")
linked_list.print_list()
print("\nString representation of linked list:")
print(_a)
print("\nReading/changing Node data using indexing:")
print(f"Element at Position 1: {linked_list[1]}")
SCREAMING_SNAKE_CASE : Dict = input("Enter New Value: ").strip()
print("New list:")
print(_a)
print(f"length of linked_list is : {len(_a)}")
if __name__ == "__main__":
main() | 76 | 1 |
"""simple docstring"""
import os
from collections.abc import Iterator
def UpperCAmelCase__ ( lowerCAmelCase__ :str = "." ) -> str:
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(lowerCAmelCase__ ):
lowercase = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(lowerCAmelCase__ )[1] in (".py", ".ipynb"):
yield os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ).lstrip("""./""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> Any:
'''simple docstring'''
return f'{i * " "}*' if i else "\n##"
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :str ) -> str:
'''simple docstring'''
lowercase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(lowerCAmelCase__ ) or old_parts[i] != new_part) and new_part:
print(f'{md_prefix(lowerCAmelCase__ )} {new_part.replace("_" , " " ).title()}' )
return new_path
def UpperCAmelCase__ ( lowerCAmelCase__ :str = "." ) -> Optional[int]:
'''simple docstring'''
lowercase = """"""
for filepath in sorted(good_file_paths(lowerCAmelCase__ ) ):
lowercase = os.path.split(lowerCAmelCase__ )
if filepath != old_path:
lowercase = print_path(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = (filepath.count(os.sep ) + 1) if filepath else 0
lowercase = f'{filepath}/{filename}'.replace(""" """ , """%20""" )
lowercase = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(f'{md_prefix(lowerCAmelCase__ )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md(""".""")
| 359 | """simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> Dict:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowercase = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
lowercase = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
lowercase = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
lowercase = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
lowercase = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
lowercase = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowercase = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
lowercase = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
lowercase = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
lowercase = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
lowercase = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
lowercase = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
lowercase = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
lowercase = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
lowercase = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
lowercase = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
lowercase = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
lowercase = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
lowercase = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
lowercase = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
lowercase = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
lowercase = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
lowercase = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
lowercase = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowerCAmelCase__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase = key.split(""".""" )
lowercase , lowercase = int(key_split[2] ), int(key_split[4] )
lowercase = config.vision_config.hidden_size
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase = key.split(""".""" )
lowercase = int(key_split[3] )
lowercase = config.text_config.hidden_size
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[
dim : dim * 2, :
]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
else:
lowercase = rename_key(lowerCAmelCase__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowercase = val.squeeze_()
else:
lowercase = val
return orig_state_dict
def UpperCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :int="groupvit-gcc-yfcc" , lowerCAmelCase__ :List[Any]=False ) -> str:
'''simple docstring'''
lowercase = GroupViTConfig()
lowercase = GroupViTModel(lowerCAmelCase__ ).eval()
lowercase = torch.load(lowerCAmelCase__ , map_location="""cpu""" )["""model"""]
lowercase = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase , lowercase = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCAmelCase__ ) == 0)
# verify result
lowercase = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
lowercase = prepare_img()
lowercase = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="""pt""" )
with torch.no_grad():
lowercase = model(**lowerCAmelCase__ )
if model_name == "groupvit-gcc-yfcc":
lowercase = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
lowercase = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1e-3 )
processor.save_pretrained(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
print("""Successfully saved processor and model to""" , lowerCAmelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowerCAmelCase__ , organization="""nielsr""" )
model.push_to_hub(lowerCAmelCase__ , organization="""nielsr""" )
if __name__ == "__main__":
__lowerCAmelCase : str =argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
__lowerCAmelCase : int =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 32 | 0 |
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
A = ("""dense.weight""", """attention.self.query""", """attention.self.key""", """attention.self.value""")
A = (
("""layer.""", """layer_"""),
("""word_embeddings.weight""", """word_embeddings"""),
("""position_embeddings.weight""", """position_embeddings"""),
("""token_type_embeddings.weight""", """token_type_embeddings"""),
(""".""", """/"""),
("""LayerNorm/weight""", """LayerNorm/gamma"""),
("""LayerNorm/bias""", """LayerNorm/beta"""),
("""weight""", """kernel"""),
)
if not os.path.isdir(UpperCAmelCase ):
os.makedirs(UpperCAmelCase )
A = model.state_dict()
def to_tf_var_name(UpperCAmelCase ):
for patt, repl in iter(UpperCAmelCase ):
A = name.replace(UpperCAmelCase , UpperCAmelCase )
return f"""bert/{name}"""
def create_tf_var(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
A = tf.dtypes.as_dtype(tensor.dtype )
A = tf.get_variable(dtype=UpperCAmelCase , shape=tensor.shape , name=UpperCAmelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(UpperCAmelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
A = to_tf_var_name(UpperCAmelCase )
A = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
A = torch_tensor.T
A = create_tf_var(tensor=UpperCAmelCase , name=UpperCAmelCase , session=UpperCAmelCase )
tf.keras.backend.set_value(UpperCAmelCase , UpperCAmelCase )
A = session.run(UpperCAmelCase )
print(f"""Successfully created {tf_name}: {np.allclose(UpperCAmelCase , UpperCAmelCase )}""" )
A = tf.train.Saver(tf.trainable_variables() )
saver.save(UpperCAmelCase , os.path.join(UpperCAmelCase , model_name.replace("""-""" , """_""" ) + """.ckpt""" ) )
def __a ( UpperCAmelCase=None ) ->Union[str, Any]:
"""simple docstring"""
A = argparse.ArgumentParser()
parser.add_argument("""--model_name""" , type=UpperCAmelCase , required=UpperCAmelCase , help="""model name e.g. bert-base-uncased""" )
parser.add_argument(
"""--cache_dir""" , type=UpperCAmelCase , default=UpperCAmelCase , required=UpperCAmelCase , help="""Directory containing pytorch model""" )
parser.add_argument("""--pytorch_model_path""" , type=UpperCAmelCase , required=UpperCAmelCase , help="""/path/to/<pytorch-model-name>.bin""" )
parser.add_argument("""--tf_cache_dir""" , type=UpperCAmelCase , required=UpperCAmelCase , help="""Directory in which to save tensorflow model""" )
A = parser.parse_args(UpperCAmelCase )
A = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=UpperCAmelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 258 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __a ( UpperCAmelCase ) ->Tuple: # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def __a ( UpperCAmelCase ) ->int: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowerCAmelCase = 42
__lowerCAmelCase = 42
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def A (self : Tuple ):
A = {}
A = []
A = 1
A = [1, 2]
A = {"""a""": 1, """b""": 2}
A = {"""a""": [1, 2], """b""": [3, 4]}
A = {"""a""": {"""1""": 1}, """b""": 2}
A = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
A = {}
A = []
A = 2
A = [2, 3]
A = {"""a""": 2, """b""": 3}
A = {"""a""": [2, 3], """b""": [4, 5]}
A = {"""a""": {"""1""": 2}, """b""": 3}
A = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
A = 2
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
A = {"""a""": np.eye(2 ), """b""": np.zeros(3 ), """c""": np.ones(2 )}
A = {"""a""": 2, """b""": 0, """c""": 2}
A = {
"""a""": np.eye(2 ).astype(_lowerCAmelCase ),
"""b""": np.zeros(3 ).astype(_lowerCAmelCase ),
"""c""": np.ones(2 ).astype(_lowerCAmelCase ),
}
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase , num_proc=_lowerCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(_lowerCAmelCase ): # can't pickle a local lambda
map_nested(lambda _lowerCAmelCase : x + 1 , _lowerCAmelCase , num_proc=_lowerCAmelCase )
def A (self : List[Any] ):
A = {"""a""": 1, """b""": 2}
A = {"""a""": 3, """b""": 4}
A = {"""a""": 5, """b""": 6}
A = sorted([("""a""", (1, 3, 5)), ("""b""", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ) , _lowerCAmelCase )
def A (self : Union[str, Any] ):
class __UpperCAmelCase :
'''simple docstring'''
__lowerCAmelCase = '''bar'''
A = Foo()
self.assertEqual(foo.my_attr , """bar""" )
with temporary_assignment(_lowerCAmelCase , """my_attr""" , """BAR""" ):
self.assertEqual(foo.my_attr , """BAR""" )
self.assertEqual(foo.my_attr , """bar""" )
@pytest.mark.parametrize(
"""iterable_length, num_proc, expected_num_proc""" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Any:
"""simple docstring"""
with patch("""datasets.utils.py_utils._single_map_nested""" ) as mock_single_map_nested, patch(
"""datasets.parallel.parallel.Pool""" ) as mock_multiprocessing_pool:
A = {f"""{i}""": i for i in range(UpperCAmelCase )}
A = map_nested(lambda UpperCAmelCase : x + 10 , UpperCAmelCase , num_proc=UpperCAmelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
@require_tf
def A (self : Dict ):
import tensorflow as tf
from tensorflow.keras import layers
A = layers.Dense(2 )
def gen_random_output():
A = tf.random.uniform((1, 3) )
return model(_lowerCAmelCase ).numpy()
with temp_seed(42 , set_tensorflow=_lowerCAmelCase ):
A = gen_random_output()
with temp_seed(42 , set_tensorflow=_lowerCAmelCase ):
A = gen_random_output()
A = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def A (self : Tuple ):
import torch
def gen_random_output():
A = torch.nn.Linear(3 , 2 )
A = torch.rand(1 , 3 )
return model(_lowerCAmelCase ).detach().numpy()
with temp_seed(42 , set_pytorch=_lowerCAmelCase ):
A = gen_random_output()
with temp_seed(42 , set_pytorch=_lowerCAmelCase ):
A = gen_random_output()
A = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def A (self : str ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
A = gen_random_output()
with temp_seed(42 ):
A = gen_random_output()
A = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("""input_data""" , [{}] )
def __a ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
A = NestedDataStructure(UpperCAmelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"""data, expected_output""" , [
({}, []),
([], []),
("""foo""", ["""foo"""]),
(["""foo""", """bar"""], ["""foo""", """bar"""]),
([["""foo""", """bar"""]], ["""foo""", """bar"""]),
([[["""foo"""], ["""bar"""]]], ["""foo""", """bar"""]),
([[["""foo"""], """bar"""]], ["""foo""", """bar"""]),
({"""a""": 1, """b""": 2}, [1, 2]),
({"""a""": [1, 2], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[[3], [4]]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, [4]]}, [1, 2, 3, 4]),
({"""a""": {"""1""": 1}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": [2]}, [1, 2]),
] , )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
A = NestedDataStructure(UpperCAmelCase ).flatten()
assert output == expected_output
def __a ( ) ->Optional[Any]:
"""simple docstring"""
A = A(x=1 , y="""foobar""" )
A = {"""x""": 1, """y""": """foobar"""}
assert asdict(UpperCAmelCase ) == expected_output
A = {"""a""": {"""b""": A(x=10 , y="""foo""" )}, """c""": [A(x=20 , y="""bar""" )]}
A = {"""a""": {"""b""": {"""x""": 10, """y""": """foo"""}}, """c""": [{"""x""": 20, """y""": """bar"""}]}
assert asdict(UpperCAmelCase ) == expected_output
with pytest.raises(UpperCAmelCase ):
asdict([1, A(x=10 , y="""foo""" )] )
def __a ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
return text.split()
def __a ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __a ( ) ->Optional[int]:
"""simple docstring"""
with Pool(2 ) as pool:
A = list(iflatmap_unordered(UpperCAmelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(UpperCAmelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
A = list(iflatmap_unordered(UpperCAmelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(UpperCAmelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
A = []
for yield_time, content in iflatmap_unordered(
UpperCAmelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"""content""": """a"""}, {"""content""": """b"""}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(UpperCAmelCase )
assert out.count("""a""" ) == 2
assert out.count("""b""" ) == 2
assert len(UpperCAmelCase ) == 4
| 258 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowercase_ = (3, 9, -11, 0, 7, 5, 1, -1)
lowercase_ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class A :
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = 42
class A :
"""simple docstring"""
def __init__( self : List[str],lowercase_ : Iterable[int] )-> None:
'''simple docstring'''
A__ = None
for i in sorted(lowercase_,reverse=lowercase_ ):
A__ = Node(lowercase_,self.head )
def __iter__( self : List[str] )-> Iterator[int]:
'''simple docstring'''
A__ = self.head
while node:
yield node.data
A__ = node.next_node
def __len__( self : str )-> int:
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self : Optional[int] )-> str:
'''simple docstring'''
return " -> ".join([str(lowercase_ ) for node in self] )
def _snake_case( SCREAMING_SNAKE_CASE__ : SortedLinkedList , SCREAMING_SNAKE_CASE__ : SortedLinkedList ) -> SortedLinkedList:
'''simple docstring'''
return SortedLinkedList(list(SCREAMING_SNAKE_CASE__ ) + list(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 361 |
def _snake_case( SCREAMING_SNAKE_CASE__ : int = 1000 ) -> int:
'''simple docstring'''
A__ = 3
A__ = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 282 | 0 |
'''simple docstring'''
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def A ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = hf_hub_download(
repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
UpperCamelCase = VideoClassificationPipeline(model=UpperCamelCase__ , image_processor=UpperCamelCase__ , top_k=2 )
UpperCamelCase = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def A ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ):
"""simple docstring"""
for example in examples:
UpperCamelCase = video_classifier(UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{'score': ANY(UpperCamelCase__ ), 'label': ANY(UpperCamelCase__ )},
{'score': ANY(UpperCamelCase__ ), 'label': ANY(UpperCamelCase__ )},
] , )
@require_torch
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
UpperCamelCase = VideoMAEFeatureExtractor(
size={'shortest_edge': 1_0} , crop_size={'height': 1_0, 'width': 1_0} )
UpperCamelCase = pipeline(
'video-classification' , model=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , frame_sampling_rate=4 )
UpperCamelCase = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
UpperCamelCase = video_classifier(UpperCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [{'score': 0.5_1_9_9, 'label': 'LABEL_0'}, {'score': 0.4_8_0_1, 'label': 'LABEL_1'}] , )
UpperCamelCase = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[{'score': 0.5_1_9_9, 'label': 'LABEL_0'}, {'score': 0.4_8_0_1, 'label': 'LABEL_1'}],
[{'score': 0.5_1_9_9, 'label': 'LABEL_0'}, {'score': 0.4_8_0_1, 'label': 'LABEL_1'}],
] , )
@require_tf
def A ( self : List[Any] ):
"""simple docstring"""
pass
| 28 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : List[str] = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """informer"""
_SCREAMING_SNAKE_CASE = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : str = "student_t" , SCREAMING_SNAKE_CASE_ : str = "nll" , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : List[int] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : int = 6_4 , SCREAMING_SNAKE_CASE_ : int = 3_2 , SCREAMING_SNAKE_CASE_ : int = 3_2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : str = "gelu" , SCREAMING_SNAKE_CASE_ : float = 0.05 , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : int = 1_0_0 , SCREAMING_SNAKE_CASE_ : float = 0.02 , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : str = "prob" , SCREAMING_SNAKE_CASE_ : int = 5 , SCREAMING_SNAKE_CASE_ : bool = True , **SCREAMING_SNAKE_CASE_ : int , ):
# time series specific configuration
lowerCAmelCase_ : Dict = prediction_length
lowerCAmelCase_ : List[str] = context_length or prediction_length
lowerCAmelCase_ : List[Any] = distribution_output
lowerCAmelCase_ : int = loss
lowerCAmelCase_ : Optional[int] = input_size
lowerCAmelCase_ : Tuple = num_time_features
lowerCAmelCase_ : List[str] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCAmelCase_ : int = scaling
lowerCAmelCase_ : List[Any] = num_dynamic_real_features
lowerCAmelCase_ : Union[str, Any] = num_static_real_features
lowerCAmelCase_ : Optional[int] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
lowerCAmelCase_ : str = cardinality
else:
lowerCAmelCase_ : Any = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
lowerCAmelCase_ : Optional[int] = embedding_dimension
else:
lowerCAmelCase_ : Union[str, Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase_ : Optional[int] = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase_ : Any = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCAmelCase_ : Any = d_model
lowerCAmelCase_ : Union[str, Any] = encoder_attention_heads
lowerCAmelCase_ : Optional[Any] = decoder_attention_heads
lowerCAmelCase_ : Any = encoder_ffn_dim
lowerCAmelCase_ : List[str] = decoder_ffn_dim
lowerCAmelCase_ : Optional[Any] = encoder_layers
lowerCAmelCase_ : Tuple = decoder_layers
lowerCAmelCase_ : Optional[int] = dropout
lowerCAmelCase_ : Dict = attention_dropout
lowerCAmelCase_ : int = activation_dropout
lowerCAmelCase_ : Dict = encoder_layerdrop
lowerCAmelCase_ : str = decoder_layerdrop
lowerCAmelCase_ : Union[str, Any] = activation_function
lowerCAmelCase_ : Union[str, Any] = init_std
lowerCAmelCase_ : Union[str, Any] = use_cache
# Informer
lowerCAmelCase_ : Optional[int] = attention_type
lowerCAmelCase_ : Any = sampling_factor
lowerCAmelCase_ : int = distil
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 224 | 0 |
from __future__ import annotations
import numpy as np
def _lowercase ( UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
return np.maximum(0 , _A )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 355 |
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__snake_case = 25_60_47
__snake_case = 25_61_45
@require_sentencepiece
@require_tokenizers
class lowercase__ ( _UpperCAmelCase , unittest.TestCase ):
A__ : int =NllbTokenizer
A__ : Optional[int] =NllbTokenizerFast
A__ : Union[str, Any] =True
A__ : Dict =True
A__ : Tuple ={}
def A_ ( self : List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ = NllbTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = NllbTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-nllb', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = tokenizer_r.save_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer_p.save_pretrained(UpperCAmelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
SCREAMING_SNAKE_CASE__ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE__ = tokenizer_r.from_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer_p.from_pretrained(UpperCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
shutil.rmtree(UpperCAmelCase_ )
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = tokenizer_r.save_pretrained(UpperCAmelCase_ , legacy_format=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer_p.save_pretrained(UpperCAmelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE__ = tokenizer_r.from_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer_p.from_pretrained(UpperCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
shutil.rmtree(UpperCAmelCase_ )
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = tokenizer_r.save_pretrained(UpperCAmelCase_ , legacy_format=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer_p.save_pretrained(UpperCAmelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE__ = tokenizer_r.from_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer_p.from_pretrained(UpperCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
shutil.rmtree(UpperCAmelCase_ )
@require_torch
def A_ ( self : Tuple ):
if not self.test_seqaseq:
return
SCREAMING_SNAKE_CASE__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Longer text that will definitely require truncation.
SCREAMING_SNAKE_CASE__ = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'
' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'
' will only worsen the violence and misery for millions of people.',
]
SCREAMING_SNAKE_CASE__ = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'
' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'
' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
try:
SCREAMING_SNAKE_CASE__ = tokenizer.prepare_seqaseq_batch(
src_texts=UpperCAmelCase_ , tgt_texts=UpperCAmelCase_ , max_length=3 , max_target_length=10 , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='ron_Latn' , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
SCREAMING_SNAKE_CASE__ = tokenizer.prepare_seqaseq_batch(
UpperCAmelCase_ , tgt_texts=UpperCAmelCase_ , max_length=3 , return_tensors='pt' )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
SCREAMING_SNAKE_CASE__ = tokenizer.prepare_seqaseq_batch(
src_texts=UpperCAmelCase_ , max_length=3 , max_target_length=10 , return_tensors='pt' )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn('decoder_input_ids' , UpperCAmelCase_ )
@unittest.skip('Unfortunately way too slow to build a BPE with SentencePiece.' )
def A_ ( self : List[Any] ):
pass
def A_ ( self : Optional[Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE__ = [AddedToken('<special>' , lstrip=UpperCAmelCase_ )]
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer_r.encode('Hey this is a <special> token' )
SCREAMING_SNAKE_CASE__ = tokenizer_r.encode('<special>' , add_special_tokens=UpperCAmelCase_ )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained(
UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer_p.encode('Hey this is a <special> token' )
SCREAMING_SNAKE_CASE__ = tokenizer_cr.encode('Hey this is a <special> token' )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__ ( unittest.TestCase ):
A__ : List[Any] ="""facebook/nllb-200-distilled-600M"""
A__ : Tuple =[
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
A__ : Optional[Any] =[
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
A__ : Optional[int] =[
2_5_6_0_4_7,
1_6_2_9_7,
1_3_4_4_0_8,
8_1_6_5,
2_4_8_0_6_6,
1_4_7_3_4,
9_5_0,
1_1_3_5,
1_0_5_7_2_1,
3_5_7_3,
8_3,
2_7_3_5_2,
1_0_8,
4_9_4_8_6,
2,
]
@classmethod
def A_ ( cls : Tuple ):
SCREAMING_SNAKE_CASE__ = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='eng_Latn' , tgt_lang='ron_Latn' )
SCREAMING_SNAKE_CASE__ = 1
return cls
def A_ ( self : int ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Arab'] , 256001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Latn'] , 256002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['fra_Latn'] , 256057 )
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase_ )
def A_ ( self : Dict ):
self.assertIn(UpperCAmelCase_ , self.tokenizer.all_special_ids )
# fmt: off
SCREAMING_SNAKE_CASE__ = [RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047]
# fmt: on
SCREAMING_SNAKE_CASE__ = self.tokenizer.decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase_ )
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = self.tokenizer(UpperCAmelCase_ , max_length=UpperCAmelCase_ , truncation=UpperCAmelCase_ ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , UpperCAmelCase_ )
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
def A_ ( self : Optional[Any] ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [256203, 3] )
def A_ ( self : Dict ):
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = NllbTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCAmelCase_ )
@require_torch
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
SCREAMING_SNAKE_CASE__ = shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['ron_Latn'] )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ = self.tokenizer(self.src_text , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=3 , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ = self.tokenizer(
text_target=self.tgt_text , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=10 , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ = targets['input_ids']
SCREAMING_SNAKE_CASE__ = shift_tokens_right(
UpperCAmelCase_ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , {
# A, test, EOS, en_XX
'input_ids': [[256047, 70, 7356, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 256057,
} , )
@require_torch
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2, 256047] )
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [256047, 16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2] )
| 169 | 0 |
def A ( _lowercase ):
if n == 1 or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return 0
elif n == 2:
return 1
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : int = 2
while digits < n:
index += 1
SCREAMING_SNAKE_CASE : int = len(str(fibonacci(SCREAMING_SNAKE_CASE__ ) ) )
return index
def A ( _lowercase = 1_000 ):
return fibonacci_digits_index(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 182 |
import pytest
__UpperCAmelCase : Optional[Any] = "__dummy_dataset1__"
__UpperCAmelCase : List[str] = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def A__ ( ) -> Optional[int]:
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def A__ ( ) -> Tuple:
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Tuple:
__snake_case: List[Any] = dataset_loading_script_name
__snake_case: Any = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=SCREAMING_SNAKE_CASE__)
__snake_case: int = script_dir / F'''{script_name}.py'''
with open(SCREAMING_SNAKE_CASE__ , """w""") as f:
f.write(SCREAMING_SNAKE_CASE__)
return str(SCREAMING_SNAKE_CASE__)
| 111 | 0 |
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__UpperCamelCase : List[str] = 'hf-internal-testing/tiny-random-bert'
__UpperCamelCase : int = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert')
__UpperCamelCase : List[str] = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6'
class lowercase__ ( unittest.TestCase):
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = cached_file(UpperCamelCase__ , UpperCamelCase__ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(UpperCamelCase__ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) )
with open(os.path.join(UpperCamelCase__ , '''refs''' , '''main''' ) ) as f:
SCREAMING_SNAKE_CASE : List[Any] = f.read()
self.assertEqual(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''snapshots''' , UpperCamelCase__ , UpperCamelCase__ ) )
self.assertTrue(os.path.isfile(UpperCamelCase__ ) )
# File is cached at the same place the second time.
SCREAMING_SNAKE_CASE : Union[str, Any] = cached_file(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# Using a specific revision to test the full commit hash.
SCREAMING_SNAKE_CASE : List[Any] = cached_file(UpperCamelCase__ , UpperCamelCase__ , revision='''9b8c223''' )
self.assertEqual(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''snapshots''' , UpperCamelCase__ , UpperCamelCase__ ) )
def __A ( self : Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(UpperCamelCase__ , '''is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE : Optional[int] = cached_file('''tiny-random-bert''' , UpperCamelCase__ )
with self.assertRaisesRegex(UpperCamelCase__ , '''is not a valid git identifier''' ):
SCREAMING_SNAKE_CASE : Tuple = cached_file(UpperCamelCase__ , UpperCamelCase__ , revision='''aaaa''' )
with self.assertRaisesRegex(UpperCamelCase__ , '''does not appear to have a file named''' ):
SCREAMING_SNAKE_CASE : str = cached_file(UpperCamelCase__ , '''conf''' )
def __A ( self : Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(UpperCamelCase__ , '''does not appear to have a file named''' ):
SCREAMING_SNAKE_CASE : Any = cached_file(UpperCamelCase__ , '''conf''' )
with open(os.path.join(UpperCamelCase__ , '''refs''' , '''main''' ) ) as f:
SCREAMING_SNAKE_CASE : str = f.read()
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''.no_exist''' , UpperCamelCase__ , '''conf''' ) ) )
SCREAMING_SNAKE_CASE : int = cached_file(UpperCamelCase__ , '''conf''' , _raise_exceptions_for_missing_entries=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = cached_file(UpperCamelCase__ , '''conf''' , local_files_only=UpperCamelCase__ , _raise_exceptions_for_missing_entries=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = mock.Mock()
SCREAMING_SNAKE_CASE : str = 500
SCREAMING_SNAKE_CASE : str = {}
SCREAMING_SNAKE_CASE : Optional[int] = HTTPError
SCREAMING_SNAKE_CASE : Dict = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=UpperCamelCase__ ) as mock_head:
SCREAMING_SNAKE_CASE : str = cached_file(UpperCamelCase__ , '''conf''' , _raise_exceptions_for_connection_errors=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
# This check we did call the fake head request
mock_head.assert_called()
def __A ( self : Any ):
'''simple docstring'''
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCamelCase__ ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCamelCase__ ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCamelCase__ ) )
def __A ( self : Dict ):
'''simple docstring'''
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(UpperCamelCase__ , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , UpperCamelCase__ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(UpperCamelCase__ , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , UpperCamelCase__ , revision='''ahaha''' )
SCREAMING_SNAKE_CASE : Any = get_file_from_repo('''bert-base-cased''' , UpperCamelCase__ )
# The name is the cached name which is not very easy to test, so instead we load the content.
SCREAMING_SNAKE_CASE : List[Any] = json.loads(open(UpperCamelCase__ , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 768 )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE : Optional[int] = Path(UpperCamelCase__ ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(UpperCamelCase__ , '''a.txt''' ) , str(UpperCamelCase__ ) )
self.assertIsNone(get_file_from_repo(UpperCamelCase__ , '''b.txt''' ) )
| 258 | import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowercase__ :
def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[Any]="resnet50" , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Any=True , UpperCamelCase__ : int=True , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : Union[str, Any] = out_indices if out_indices is not None else [4]
SCREAMING_SNAKE_CASE : List[Any] = stage_names
SCREAMING_SNAKE_CASE : int = out_features
SCREAMING_SNAKE_CASE : Optional[int] = backbone
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE : Dict = image_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : List[Any] = use_pretrained_backbone
SCREAMING_SNAKE_CASE : Dict = is_training
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, pixel_values
def __A ( self : List[Any] ):
'''simple docstring'''
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __A ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = TimmBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(UpperCamelCase__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = config_and_inputs
SCREAMING_SNAKE_CASE : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = (TimmBackbone,) if is_torch_available() else ()
UpperCamelCase_ = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = TimmBackboneModelTester(self )
SCREAMING_SNAKE_CASE : Tuple = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def __A ( self : List[Any] ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = '''resnet18'''
SCREAMING_SNAKE_CASE : str = '''microsoft/resnet-18'''
SCREAMING_SNAKE_CASE : Dict = AutoBackbone.from_pretrained(UpperCamelCase__ , use_timm_backbone=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = AutoBackbone.from_pretrained(UpperCamelCase__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
SCREAMING_SNAKE_CASE : List[str] = AutoBackbone.from_pretrained(UpperCamelCase__ , use_timm_backbone=UpperCamelCase__ , out_indices=[1, 2, 3] )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoBackbone.from_pretrained(UpperCamelCase__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def __A ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def __A ( self : int ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def __A ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def __A ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def __A ( self : Any ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def __A ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __A ( self : int ):
'''simple docstring'''
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def __A ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __A ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def __A ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def __A ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self : int ):
'''simple docstring'''
pass
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Any = self.has_attentions
# no need to test all models as different heads yield the same functionality
SCREAMING_SNAKE_CASE : Any = self.all_model_classes[0]
SCREAMING_SNAKE_CASE : List[str] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = model(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = outputs[0][-1]
# Encoder-/Decoder-only models
SCREAMING_SNAKE_CASE : List[Any] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
SCREAMING_SNAKE_CASE : Any = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=UpperCamelCase__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(**UpperCamelCase__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
SCREAMING_SNAKE_CASE : List[str] = copy.deepcopy(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(**UpperCamelCase__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(**UpperCamelCase__ )
| 258 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
@slow
def __a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' )
lowercase : int = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase : Optional[Any] = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids
lowercase : Dict = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids
lowercase : List[Any] = model(_A , labels=_A ).loss
lowercase : Dict = -tf.math.reduce_mean(_A ).numpy()
lowercase : Union[str, Any] = -21.228_168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 ) | 308 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_lowerCamelCase ) , '''Tatoeba directory does not exist.''' )
class _A ( unittest.TestCase ):
@cached_property
def __a ( self : int ) -> Dict:
"""simple docstring"""
lowercase : str = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_A )
@slow
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def __a ( self : int ) -> Tuple:
"""simple docstring"""
lowercase , lowercase : Optional[Any] = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=_A )
assert mmeta["long_pair"] == "heb-eng" | 308 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase_ : Dict = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class a__ ( __snake_case ):
A__ : List[Any] = 'bert'
def __init__( self , UpperCAmelCase=3_0_5_2_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=0 , UpperCAmelCase="absolute" , UpperCAmelCase=True , UpperCAmelCase=None , **UpperCAmelCase , ) -> int:
super().__init__(pad_token_id=UpperCAmelCase , **UpperCAmelCase )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = position_embedding_type
__a = use_cache
__a = classifier_dropout
class a__ ( __snake_case ):
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__a = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__a = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 197 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase_ : Dict = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : int = [
"""SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwinForImageClassification""",
"""SwinForMaskedImageModeling""",
"""SwinModel""",
"""SwinPreTrainedModel""",
"""SwinBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"""TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSwinForImageClassification""",
"""TFSwinForMaskedImageModeling""",
"""TFSwinModel""",
"""TFSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 197 | 1 |
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase = "x", lowerCamelCase = 1_0**-1_0, lowerCamelCase = 1, ):
__lowerCAmelCase = symbols(lowerCamelCase)
__lowerCAmelCase = lambdify(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = lambdify(lowerCamelCase, diff(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = starting_point
while True:
if diff_function(lowerCamelCase) != 0:
__lowerCAmelCase = prev_guess - multiplicity * func(lowerCamelCase) / diff_function(
lowerCamelCase)
else:
raise ZeroDivisionError('''Could not find root''') from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess) < precision:
return next_guess
__lowerCAmelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""")
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
f"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
f"""{newton_raphson('exp(x) - 1', 1_0, precision=0.0_05)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 174 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Optional[Any] = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 174 | 1 |
def lowerCAmelCase_ (lowerCAmelCase__: Tuple = 1_0_0 ):
"""simple docstring"""
UpperCAmelCase_: Optional[int] = (n * (n + 1) // 2) ** 2
UpperCAmelCase_: Optional[int] = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 358 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
a : Dict = datasets.logging.get_logger(__name__)
a : Any = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
a : int = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
a : List[Any] = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def lowerCAmelCase_ (lowerCAmelCase__: List[str] , lowerCAmelCase__: List[Any] , lowerCAmelCase__: Dict=False , lowerCAmelCase__: List[Any]=False , lowerCAmelCase__: Any=True , lowerCAmelCase__: Union[str, Any]=False , lowerCAmelCase__: List[Any]="dummy_doc" ):
"""simple docstring"""
UpperCAmelCase_: str = {doc: key_lines}
UpperCAmelCase_: str = {doc: sys_lines}
UpperCAmelCase_: Optional[Any] = {}
UpperCAmelCase_: Optional[int] = 0
UpperCAmelCase_: Optional[Any] = 0
UpperCAmelCase_: str = 0
UpperCAmelCase_: List[Any] = 0
UpperCAmelCase_: Tuple = 0
UpperCAmelCase_: Union[str, Any] = 0
UpperCAmelCase_ , UpperCAmelCase_: List[str] = reader.get_doc_mentions(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
UpperCAmelCase_: List[str] = reader.set_annotated_parse_trees(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_: Any = reader.get_doc_mentions(lowerCAmelCase__ , sys_doc_lines[doc] , lowerCAmelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
UpperCAmelCase_: Tuple = reader.set_annotated_parse_trees(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ , lowerCAmelCase__ )
if remove_nested:
UpperCAmelCase_ , UpperCAmelCase_: str = reader.remove_nested_coref_mentions(lowerCAmelCase__ , lowerCAmelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = reader.remove_nested_coref_mentions(lowerCAmelCase__ , lowerCAmelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
UpperCAmelCase_: Tuple = reader.get_mention_assignments(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_: Dict = reader.get_mention_assignments(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_: Optional[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
"""Number of resulting singleton clusters in the key """
F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
"""files, respectively""" )
return doc_coref_infos
def lowerCAmelCase_ (lowerCAmelCase__: Any , lowerCAmelCase__: Dict , lowerCAmelCase__: int , lowerCAmelCase__: Any , lowerCAmelCase__: Optional[int] , lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: int ):
"""simple docstring"""
UpperCAmelCase_: Tuple = get_coref_infos(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_: Any = {}
UpperCAmelCase_: Tuple = 0
UpperCAmelCase_: Optional[Any] = 0
for name, metric in metrics:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Dict = evaluator.evaluate_documents(lowerCAmelCase__ , lowerCAmelCase__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} )
logger.info(
name.ljust(1_0 ) , F'Recall: {recall * 1_0_0:.2f}' , F' Precision: {precision * 1_0_0:.2f}' , F' F1: {fa * 1_0_0:.2f}' , )
if conll_subparts_num == 3:
UpperCAmelCase_: List[str] = (conll / 3) * 1_0_0
logger.info(F'CoNLL score: {conll:.2f}' )
output_scores.update({"""conll_score""": conll} )
return output_scores
def lowerCAmelCase_ (lowerCAmelCase__: Optional[Any] ):
"""simple docstring"""
UpperCAmelCase_: Dict = False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
UpperCAmelCase_: Any = line.split()[5]
if not parse_col == "-":
UpperCAmelCase_: List[str] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def __snake_case (self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ), codebase_urls=["""https://github.com/ns-moosavi/coval"""], reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
], )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False ) -> int:
UpperCAmelCase_: Tuple = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
UpperCAmelCase_: str = util.check_gold_parse_annotation(SCREAMING_SNAKE_CASE_ )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
UpperCAmelCase_: Tuple = evaluate(
key_lines=SCREAMING_SNAKE_CASE_, sys_lines=SCREAMING_SNAKE_CASE_, metrics=SCREAMING_SNAKE_CASE_, NP_only=SCREAMING_SNAKE_CASE_, remove_nested=SCREAMING_SNAKE_CASE_, keep_singletons=SCREAMING_SNAKE_CASE_, min_span=SCREAMING_SNAKE_CASE_, )
return score
| 82 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : List[str] = '''perceiver'''
def __init__(self , __magic_name__=256 , __magic_name__=1280 , __magic_name__=768 , __magic_name__=1 , __magic_name__=26 , __magic_name__=8 , __magic_name__=8 , __magic_name__=None , __magic_name__=None , __magic_name__="kv" , __magic_name__=1 , __magic_name__=1 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=True , __magic_name__=262 , __magic_name__=2048 , __magic_name__=56 , __magic_name__=[368, 496] , __magic_name__=16 , __magic_name__=1920 , __magic_name__=16 , __magic_name__=[1, 16, 224, 224] , **__magic_name__ , ) -> Any:
'''simple docstring'''
super().__init__(**__magic_name__ )
snake_case_ : Union[str, Any] = num_latents
snake_case_ : Union[str, Any] = d_latents
snake_case_ : Union[str, Any] = d_model
snake_case_ : str = num_blocks
snake_case_ : Optional[int] = num_self_attends_per_block
snake_case_ : Optional[int] = num_self_attention_heads
snake_case_ : Optional[int] = num_cross_attention_heads
snake_case_ : Dict = qk_channels
snake_case_ : Optional[Any] = v_channels
snake_case_ : Tuple = cross_attention_shape_for_attention
snake_case_ : Union[str, Any] = self_attention_widening_factor
snake_case_ : Optional[int] = cross_attention_widening_factor
snake_case_ : Dict = hidden_act
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : str = initializer_range
snake_case_ : List[Any] = layer_norm_eps
snake_case_ : List[Any] = use_query_residual
# masked language modeling attributes
snake_case_ : int = vocab_size
snake_case_ : List[Any] = max_position_embeddings
# image classification attributes
snake_case_ : Tuple = image_size
# flow attributes
snake_case_ : Any = train_size
# multimodal autoencoding attributes
snake_case_ : Any = num_frames
snake_case_ : Optional[Any] = audio_samples_per_frame
snake_case_ : Any = samples_per_patch
snake_case_ : Optional[Any] = output_shape
class __lowerCAmelCase ( _a ):
@property
def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case_ : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case_ : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
@property
def lowerCamelCase (self ) -> float:
'''simple docstring'''
return 1e-4
def lowerCamelCase (self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , __magic_name__ = 3 , __magic_name__ = 40 , __magic_name__ = 40 , ) -> Mapping[str, Any]:
'''simple docstring'''
if isinstance(__magic_name__ , __magic_name__ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case_ : Tuple = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case_ : Any = preprocessor.num_special_tokens_to_add(__magic_name__ )
snake_case_ : Dict = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__magic_name__ )
# Generate dummy inputs according to compute batch and sequence
snake_case_ : Tuple = [''' '''.join(['''a'''] ) * seq_length] * batch_size
snake_case_ : str = dict(preprocessor(__magic_name__ , return_tensors=__magic_name__ ) )
snake_case_ : Optional[Any] = inputs.pop('''input_ids''' )
return inputs
elif isinstance(__magic_name__ , __magic_name__ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case_ : Dict = compute_effective_axis_dimension(__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_batch )
snake_case_ : str = self._generate_dummy_images(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
snake_case_ : List[Any] = dict(preprocessor(images=__magic_name__ , return_tensors=__magic_name__ ) )
snake_case_ : Dict = inputs.pop('''pixel_values''' )
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
| 279 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowerCAmelCase_ = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case_ : List[str] = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
snake_case_ : str = list(s_dict.keys() )
for key in keys:
snake_case_ : Optional[int] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
snake_case_ : List[str] = new_key.replace(_UpperCamelCase , _UpperCamelCase )
print(f'''{key} -> {new_key}''' )
snake_case_ : Tuple = s_dict.pop(_UpperCamelCase )
return s_dict
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
snake_case_ , snake_case_ : Dict = emb.weight.shape
snake_case_ : Tuple = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
snake_case_ : Any = emb.weight.data
return lin_layer
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> bytes:
"""simple docstring"""
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ : List[Any] = os.path.basename(_UpperCamelCase )
snake_case_ : Any = url.split('''/''' )[-2]
snake_case_ : str = os.path.join(_UpperCamelCase , _UpperCamelCase )
if os.path.exists(_UpperCamelCase ) and not os.path.isfile(_UpperCamelCase ):
raise RuntimeError(f'''{download_target} exists and is not a regular file''' )
if os.path.isfile(_UpperCamelCase ):
snake_case_ : Union[str, Any] = open(_UpperCamelCase , '''rb''' ).read()
if hashlib.shaaaa(_UpperCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(_UpperCamelCase ) as source, open(_UpperCamelCase , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=80 , unit='''iB''' , unit_scale=_UpperCamelCase , unit_divisor=1_024 ) as loop:
while True:
snake_case_ : Dict = source.read(8_192 )
if not buffer:
break
output.write(_UpperCamelCase )
loop.update(len(_UpperCamelCase ) )
snake_case_ : Any = open(_UpperCamelCase , '''rb''' ).read()
if hashlib.shaaaa(_UpperCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
if ".pt" not in checkpoint_path:
snake_case_ : str = _download(_MODELS[checkpoint_path] )
else:
snake_case_ : Union[str, Any] = torch.load(_UpperCamelCase , map_location='''cpu''' )
snake_case_ : int = original_checkpoint['''dims''']
snake_case_ : List[str] = original_checkpoint['''model_state_dict''']
snake_case_ : str = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(_UpperCamelCase )
rename_keys(_UpperCamelCase )
snake_case_ : Optional[int] = True
snake_case_ : int = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
snake_case_ : List[str] = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=_UpperCamelCase , decoder_ffn_dim=_UpperCamelCase , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
snake_case_ : Union[str, Any] = WhisperForConditionalGeneration(_UpperCamelCase )
snake_case_ , snake_case_ : List[Any] = model.model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
if len(_UpperCamelCase ) > 0 and not set(_UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f''' but all the following weights are missing {missing}''' )
if tie_embeds:
snake_case_ : List[str] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
snake_case_ : Any = proj_out_weights
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase_ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 279 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
'''uw-madison/mra-base-512-4''': '''https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json''',
}
class a ( __lowerCamelCase ):
__lowerCAmelCase : Tuple = """mra"""
def __init__( self :int ,__lowercase :Union[str, Any]=5_0_2_6_5 ,__lowercase :Dict=7_6_8 ,__lowercase :int=1_2 ,__lowercase :Any=1_2 ,__lowercase :Union[str, Any]=3_0_7_2 ,__lowercase :Union[str, Any]="gelu" ,__lowercase :str=0.1 ,__lowercase :Dict=0.1 ,__lowercase :List[str]=5_1_2 ,__lowercase :Optional[int]=1 ,__lowercase :Optional[Any]=0.02 ,__lowercase :int=1e-5 ,__lowercase :int="absolute" ,__lowercase :Optional[Any]=4 ,__lowercase :Tuple="full" ,__lowercase :Optional[Any]=0 ,__lowercase :Optional[Any]=0 ,__lowercase :Union[str, Any]=1 ,__lowercase :List[str]=0 ,__lowercase :List[str]=2 ,**__lowercase :Any ,):
super().__init__(pad_token_id=__lowercase ,bos_token_id=__lowercase ,eos_token_id=__lowercase ,**__lowercase )
snake_case__ : List[Any] = vocab_size
snake_case__ : int = max_position_embeddings
snake_case__ : str = hidden_size
snake_case__ : Dict = num_hidden_layers
snake_case__ : Dict = num_attention_heads
snake_case__ : Optional[int] = intermediate_size
snake_case__ : Optional[Any] = hidden_act
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : Optional[Any] = attention_probs_dropout_prob
snake_case__ : Any = initializer_range
snake_case__ : Optional[int] = type_vocab_size
snake_case__ : Union[str, Any] = layer_norm_eps
snake_case__ : Tuple = position_embedding_type
snake_case__ : List[str] = block_per_row
snake_case__ : str = approx_mode
snake_case__ : List[str] = initial_prior_first_n_blocks
snake_case__ : List[Any] = initial_prior_diagonal_n_blocks
| 44 |
import argparse
from collections import defaultdict
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Dict = f"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__lowerCAmelCase , '''r''' ) as f:
snake_case__ : str = f.readlines()
snake_case__ : List[str] = f"""class {class_name}("""
snake_case__ : Any = f"""{4 * ' '}def {test_name}("""
snake_case__ : Optional[int] = f"""{8 * ' '}{correct_line.split()[0]}"""
snake_case__ : List[str] = f"""{16 * ' '}{correct_line.split()[0]}"""
snake_case__ : Any = False
snake_case__ : Optional[int] = False
snake_case__ : Optional[Any] = False
snake_case__ : int = False
snake_case__ : Union[str, Any] = 0
snake_case__ : str = 0
snake_case__ : Union[str, Any] = []
for line in lines:
if line.startswith(__lowerCAmelCase ):
snake_case__ : Optional[Any] = True
elif in_class and line.startswith(__lowerCAmelCase ):
snake_case__ : Optional[int] = True
elif in_class and in_func and (line.startswith(__lowerCAmelCase ) or line.startswith(__lowerCAmelCase )):
snake_case__ : int = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
snake_case__ : Tuple = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
snake_case__ : List[Any] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"""{spaces * ' '}{correct_line}""" )
snake_case__ : Optional[int] = False
else:
new_lines.append(__lowerCAmelCase )
with open(__lowerCAmelCase , '''w''' ) as f:
for line in new_lines:
f.write(__lowerCAmelCase )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase=None ) -> Dict:
"""simple docstring"""
if fail is not None:
with open(__lowerCAmelCase , '''r''' ) as f:
snake_case__ : Optional[int] = {l.strip() for l in f.readlines()}
else:
snake_case__ : Tuple = None
with open(__lowerCAmelCase , '''r''' ) as f:
snake_case__ : Optional[int] = f.readlines()
snake_case__ : Tuple = defaultdict(__lowerCAmelCase )
for line in correct_lines:
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = line.split(''';''' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
A__ = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 44 | 1 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
set_seed(770)
_snake_case = {
"""c_attn""": """att_proj""",
"""c_proj""": """out_proj""",
"""c_fc""": """in_proj""",
"""transformer.""": """""",
"""h.""": """layers.""",
"""ln_1""": """layernorm_1""",
"""ln_2""": """layernorm_2""",
"""ln_f""": """layernorm_final""",
"""wpe""": """position_embeds_layer""",
"""wte""": """input_embeds_layer""",
}
_snake_case = {
"""text_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text.pt""",
},
"""coarse_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse.pt""",
},
"""fine_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine.pt""",
},
"""text""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text_2.pt""",
},
"""coarse""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse_2.pt""",
},
"""fine""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine_2.pt""",
},
}
_snake_case = os.path.dirname(os.path.abspath(__file__))
_snake_case = os.path.join(os.path.expanduser('~'), '.cache')
_snake_case = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0')
def _A ( snake_case , snake_case=False ) -> str:
_lowercase : Dict = model_type
if use_small:
key += "_small"
return os.path.join(a__ , REMOTE_MODEL_PATHS[key]["file_name"] )
def _A ( snake_case , snake_case ) -> Dict:
os.makedirs(a__ , exist_ok=a__ )
hf_hub_download(repo_id=a__ , filename=a__ , local_dir=a__ )
def _A ( snake_case , snake_case , snake_case=False , snake_case="text" ) -> Tuple:
if model_type == "text":
_lowercase : Optional[int] = BarkSemanticModel
_lowercase : int = BarkSemanticConfig
_lowercase : List[Any] = BarkSemanticGenerationConfig
elif model_type == "coarse":
_lowercase : Tuple = BarkCoarseModel
_lowercase : List[str] = BarkCoarseConfig
_lowercase : List[Any] = BarkCoarseGenerationConfig
elif model_type == "fine":
_lowercase : int = BarkFineModel
_lowercase : Any = BarkFineConfig
_lowercase : List[str] = BarkFineGenerationConfig
else:
raise NotImplementedError()
_lowercase : Any = F'''{model_type}_small''' if use_small else model_type
_lowercase : Optional[Any] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(a__ ):
logger.info(F'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' )
_download(model_info["repo_id"] , model_info["file_name"] )
_lowercase : Dict = torch.load(a__ , map_location=a__ )
# this is a hack
_lowercase : Optional[int] = checkpoint["""model_args"""]
if "input_vocab_size" not in model_args:
_lowercase : List[Any] = model_args["""vocab_size"""]
_lowercase : Dict = model_args["""vocab_size"""]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_lowercase : List[str] = model_args.pop("n_head" )
_lowercase : str = model_args.pop("n_embd" )
_lowercase : int = model_args.pop("n_layer" )
_lowercase : Union[str, Any] = ConfigClass(**checkpoint["model_args"] )
_lowercase : Optional[int] = ModelClass(config=a__ )
_lowercase : int = GenerationConfigClass()
_lowercase : Optional[Any] = model_generation_config
_lowercase : Optional[Any] = checkpoint["""model"""]
# fixup checkpoint
_lowercase : Tuple = """_orig_mod."""
for k, v in list(state_dict.items() ):
if k.startswith(a__ ):
# replace part of the key with corresponding layer name in HF implementation
_lowercase : List[str] = k[len(a__ ) :]
for old_layer_name in new_layer_name_dict:
_lowercase : Tuple = new_k.replace(a__ , new_layer_name_dict[old_layer_name] )
_lowercase : Optional[Any] = state_dict.pop(a__ )
_lowercase : int = set(state_dict.keys() ) - set(model.state_dict().keys() )
_lowercase : Any = {k for k in extra_keys if not k.endswith(".attn.bias" )}
_lowercase : Optional[int] = set(model.state_dict().keys() ) - set(state_dict.keys() )
_lowercase : Optional[Any] = {k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(a__ ) != 0:
raise ValueError(F'''extra keys found: {extra_keys}''' )
if len(a__ ) != 0:
raise ValueError(F'''missing keys: {missing_keys}''' )
model.load_state_dict(a__ , strict=a__ )
_lowercase : Tuple = model.num_parameters(exclude_embeddings=a__ )
_lowercase : int = checkpoint["""best_val_loss"""].item()
logger.info(F'''model loaded: {round(n_params/1E6 , 1 )}M params, {round(a__ , 3 )} loss''' )
model.eval()
model.to(a__ )
del checkpoint, state_dict
return model
def _A ( snake_case , snake_case=False , snake_case="text" ) -> int:
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_lowercase : int = """cpu""" # do conversion on cpu
_lowercase : List[Any] = _get_ckpt_path(a__ , use_small=a__ )
_lowercase : List[str] = _load_model(a__ , a__ , model_type=a__ , use_small=a__ )
# load bark initial model
_lowercase : Dict = _bark_load_model(a__ , "cpu" , model_type=a__ , use_small=a__ )
if model_type == "text":
_lowercase : Optional[int] = bark_model["""model"""]
if model.num_parameters(exclude_embeddings=a__ ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
_lowercase : int = 5
_lowercase : Optional[int] = 10
if model_type in ["text", "coarse"]:
_lowercase : Dict = torch.randint(2_56 , (batch_size, sequence_length) , dtype=torch.int )
_lowercase : str = bark_model(a__ )[0]
_lowercase : int = model(a__ )
# take last logits
_lowercase : Tuple = output_new_model_total.logits[:, [-1], :]
else:
_lowercase : Optional[int] = 3
_lowercase : Optional[int] = 8
_lowercase : Union[str, Any] = torch.randint(2_56 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
_lowercase : Tuple = model(a__ , a__ )
_lowercase : Optional[Any] = bark_model(a__ , a__ )
_lowercase : Union[str, Any] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("initial and new outputs are not equal" )
Path(a__ ).mkdir(exist_ok=a__ )
model.save_pretrained(a__ )
def _A ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> str:
_lowercase : int = os.path.join(a__ , a__ )
_lowercase : str = BarkSemanticConfig.from_pretrained(os.path.join(a__ , "config.json" ) )
_lowercase : Optional[int] = BarkCoarseConfig.from_pretrained(os.path.join(a__ , "config.json" ) )
_lowercase : Union[str, Any] = BarkFineConfig.from_pretrained(os.path.join(a__ , "config.json" ) )
_lowercase : Any = EncodecConfig.from_pretrained("facebook/encodec_24khz" )
_lowercase : List[str] = BarkSemanticModel.from_pretrained(a__ )
_lowercase : Optional[int] = BarkCoarseModel.from_pretrained(a__ )
_lowercase : Any = BarkFineModel.from_pretrained(a__ )
_lowercase : Optional[int] = EncodecModel.from_pretrained("facebook/encodec_24khz" )
_lowercase : Union[str, Any] = BarkConfig.from_sub_model_configs(
a__ , a__ , a__ , a__ )
_lowercase : Optional[int] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
_lowercase : List[Any] = BarkModel(a__ )
_lowercase : Dict = semantic
_lowercase : List[str] = coarseAcoustic
_lowercase : int = fineAcoustic
_lowercase : Tuple = codec
_lowercase : Any = bark_generation_config
Path(a__ ).mkdir(exist_ok=a__ )
bark.save_pretrained(a__ , repo_id=a__ , push_to_hub=a__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument('model_type', type=str, help='text, coarse or fine.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.')
_snake_case = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 250 |
# Algorithm for the pigeonhole sorting
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : List[Any] = min(a__) # min() finds the minimum value
a_ : List[str] = max(a__) # max() finds the maximum value
a_ : str = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
a_ : Any = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(a__ , a__), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
a_ : Tuple = 0
for count in range(a__):
while holes[count] > 0:
holes[count] -= 1
a_ : Optional[Any] = count + min_val
i += 1
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : List[Any] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(a__)
print("""Sorted order is:""" , """ """.join(a__))
if __name__ == "__main__":
main()
| 248 | 0 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__UpperCAmelCase = pytest.mark.integration
@require_faiss
class a__ ( a__ ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(lowerCamelCase_ ) for x in np.arange(30 ).tolist()]} )
return dset
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
import faiss
lowerCAmelCase__ = self._create_dummy_dataset()
lowerCAmelCase__ = dset.map(
lambda lowerCamelCase_ , lowerCamelCase_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ )
lowerCAmelCase__ = dset.add_faiss_index('''vecs''' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCAmelCase__ , lowerCAmelCase__ = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
dset.drop_index('''vecs''' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
import faiss
lowerCAmelCase__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCAmelCase__ , lowerCAmelCase__ = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
import faiss
lowerCAmelCase__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCamelCase_ ) as tmp_file:
dset.save_faiss_index('''vecs''' , tmp_file.name )
dset.load_faiss_index('''vecs2''' , tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase__ , lowerCAmelCase__ = dset.get_nearest_examples('''vecs2''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' )
dset.drop_index('''vecs''' )
self.assertRaises(lowerCamelCase_ , partial(dset.get_nearest_examples , '''vecs2''' , np.ones(5 , dtype=np.floataa ) ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
from elasticsearch import Elasticsearch
lowerCAmelCase__ = self._create_dummy_dataset()
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
lowerCAmelCase__ = {'''acknowledged''': True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCAmelCase__ = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 29}]}}
lowerCAmelCase__ = Elasticsearch()
dset.add_elasticsearch_index('''filename''' , es_client=lowerCamelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = dset.get_nearest_examples('''filename''' , '''my_name-train_29''' )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
@require_faiss
class a__ ( a__ ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
import faiss
lowerCAmelCase__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCAmelCase__ = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase__ = 1
lowerCAmelCase__ , lowerCAmelCase__ = index.search(lowerCamelCase_ )
self.assertRaises(lowerCamelCase_ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCAmelCase__ = np.eye(5 , dtype=np.floataa )[::-1]
lowerCAmelCase__ , lowerCAmelCase__ = index.search_batch(lowerCamelCase_ )
self.assertRaises(lowerCamelCase_ , index.search_batch , queries[0] )
lowerCAmelCase__ = [scores[0] for scores in total_scores]
lowerCAmelCase__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCamelCase_ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
import faiss
lowerCAmelCase__ = FaissIndex(string_factory='''Flat''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCAmelCase__ = FaissIndex(string_factory='''LSH''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(lowerCamelCase_ ):
lowerCAmelCase__ = FaissIndex(string_factory='''Flat''' , custom_index=faiss.IndexFlat(5 ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
import faiss
lowerCAmelCase__ = faiss.IndexFlat(5 )
lowerCAmelCase__ = FaissIndex(custom_index=lowerCamelCase_ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
import faiss
lowerCAmelCase__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCamelCase_ ) as tmp_file:
index.save(tmp_file.name )
lowerCAmelCase__ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase__ = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase__ = 1
lowerCAmelCase__ , lowerCAmelCase__ = index.search(lowerCamelCase_ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _snake_case ( A ) -> Tuple:
import faiss
lowerCAmelCase__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCAmelCase__ = '''index.faiss'''
lowerCAmelCase__ = F"""mock://{index_name}"""
index.save(A , storage_options=mockfs.storage_options )
lowerCAmelCase__ = FaissIndex.load(A , storage_options=mockfs.storage_options )
lowerCAmelCase__ = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase__ = 1
lowerCAmelCase__ , lowerCAmelCase__ = index.search(A )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class a__ ( a__ ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
from elasticsearch import Elasticsearch
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
lowerCAmelCase__ = Elasticsearch()
lowerCAmelCase__ = {'''acknowledged''': True}
lowerCAmelCase__ = ElasticSearchIndex(es_client=lowerCamelCase_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['''foo''', '''bar''', '''foobar'''] )
# single query
lowerCAmelCase__ = '''foo'''
lowerCAmelCase__ = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
lowerCAmelCase__ , lowerCAmelCase__ = index.search(lowerCamelCase_ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCAmelCase__ = '''foo'''
lowerCAmelCase__ = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
lowerCAmelCase__ , lowerCAmelCase__ = index.search(lowerCamelCase_ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCAmelCase__ = ['''foo''', '''bar''', '''foobar''']
lowerCAmelCase__ = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
lowerCAmelCase__ , lowerCAmelCase__ = index.search_batch(lowerCamelCase_ )
lowerCAmelCase__ = [scores[0] for scores in total_scores]
lowerCAmelCase__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCamelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , lowerCamelCase_ )
# batched queries with timeout
lowerCAmelCase__ = ['''foo''', '''bar''', '''foobar''']
lowerCAmelCase__ = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
lowerCAmelCase__ , lowerCAmelCase__ = index.search_batch(lowerCamelCase_ , request_timeout=30 )
lowerCAmelCase__ = [scores[0] for scores in total_scores]
lowerCAmelCase__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCamelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , lowerCamelCase_ ) | 364 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ = None ) -> List[str]:
lowerCAmelCase__ = value
lowerCAmelCase__ = None # Added in order to delete a node easier
lowerCAmelCase__ = None
lowerCAmelCase__ = None
def __repr__( self ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F"""{self.value}""": (self.left, self.right)} , indent=1 )
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ = None ) -> Union[str, Any]:
lowerCAmelCase__ = root
def __str__( self ) -> str:
return str(self.root )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
if new_children is not None: # reset its kids
lowerCAmelCase__ = node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowerCamelCase_ ): # If it is the right children
lowerCAmelCase__ = new_children
else:
lowerCAmelCase__ = new_children
else:
lowerCAmelCase__ = new_children
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def __SCREAMING_SNAKE_CASE ( self ) -> bool:
return self.root is None
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> None:
lowerCAmelCase__ = Node(lowerCamelCase_ ) # create a new Node
if self.empty(): # if Tree is empty
lowerCAmelCase__ = new_node # set its root
else: # Tree is not empty
lowerCAmelCase__ = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
lowerCAmelCase__ = new_node # We insert the new node in a leaf
break
else:
lowerCAmelCase__ = parent_node.left
else:
if parent_node.right is None:
lowerCAmelCase__ = new_node
break
else:
lowerCAmelCase__ = parent_node.right
lowerCAmelCase__ = parent_node
def __SCREAMING_SNAKE_CASE ( self , *lowerCamelCase_ ) -> None:
for value in values:
self.__insert(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Node | None:
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
lowerCAmelCase__ = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
lowerCAmelCase__ = node.left if value < node.value else node.right
return node
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ = None ) -> Node | None:
if node is None:
if self.root is None:
return None
lowerCAmelCase__ = self.root
if not self.empty():
while node.right is not None:
lowerCAmelCase__ = node.right
return node
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ = None ) -> Node | None:
if node is None:
lowerCAmelCase__ = self.root
if self.root is None:
return None
if not self.empty():
lowerCAmelCase__ = self.root
while node.left is not None:
lowerCAmelCase__ = node.left
return node
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> None:
lowerCAmelCase__ = self.search(lowerCamelCase_ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowerCamelCase_ , lowerCamelCase_ )
elif node.left is None: # Has only right children
self.__reassign_nodes(lowerCamelCase_ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(lowerCamelCase_ , node.left )
else:
lowerCAmelCase__ = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
lowerCAmelCase__ = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
if node:
self.inorder(lowerCamelCase_ , node.left )
arr.append(node.value )
self.inorder(lowerCamelCase_ , node.right )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> int:
lowerCAmelCase__ = []
self.inorder(lowerCamelCase_ , lowerCamelCase_ ) # append all values to list using inorder traversal
return arr[k - 1]
def _snake_case ( A ) -> list[Node]:
lowerCAmelCase__ = []
if curr_node is not None:
lowerCAmelCase__ = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def _snake_case ( ) -> None:
lowerCAmelCase__ = (8, 3, 6, 1, 10, 14, 13, 4, 7)
lowerCAmelCase__ = BinarySearchTree()
for i in testlist:
t.insert(A )
# Prints all the elements of the list in order traversal
print(A )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' , t.get_max().value ) # type: ignore
print('''Min Value: ''' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(A )
print(A )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 228 | 0 |
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int=1 ):
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split('''.''' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('''.''' )[:n_shave_prefix_segments] )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int=0 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = []
for old_item in old_list:
lowercase_ : Optional[Any] = old_item.replace('''in_layers.0''' , '''norm1''' )
lowercase_ : Dict = new_item.replace('''in_layers.2''' , '''conv1''' )
lowercase_ : int = new_item.replace('''out_layers.0''' , '''norm2''' )
lowercase_ : Union[str, Any] = new_item.replace('''out_layers.3''' , '''conv2''' )
lowercase_ : List[Any] = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' )
lowercase_ : int = new_item.replace('''skip_connection''' , '''conv_shortcut''' )
lowercase_ : Union[str, Any] = shave_segments(__SCREAMING_SNAKE_CASE , n_shave_prefix_segments=__SCREAMING_SNAKE_CASE )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any]=0 ):
"""simple docstring"""
lowercase_ : List[Any] = []
for old_item in old_list:
lowercase_ : Union[str, Any] = old_item
lowercase_ : Optional[Any] = new_item.replace('''norm.weight''' , '''group_norm.weight''' )
lowercase_ : List[str] = new_item.replace('''norm.bias''' , '''group_norm.bias''' )
lowercase_ : Tuple = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' )
lowercase_ : List[Any] = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' )
lowercase_ : Tuple = shave_segments(__SCREAMING_SNAKE_CASE , n_shave_prefix_segments=__SCREAMING_SNAKE_CASE )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Dict=None ):
"""simple docstring"""
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
lowercase_ : Tuple = old_checkpoint[path]
lowercase_ : str = old_tensor.shape[0] // 3
lowercase_ : List[str] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
lowercase_ : List[Any] = old_tensor.shape[0] // config['''num_head_channels'''] // 3
lowercase_ : int = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
lowercase_ , lowercase_ , lowercase_ : Optional[int] = old_tensor.split(channels // num_heads , dim=1 )
lowercase_ : Tuple = query.reshape(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = key.reshape(__SCREAMING_SNAKE_CASE )
lowercase_ : int = value.reshape(__SCREAMING_SNAKE_CASE )
for path in paths:
lowercase_ : List[str] = path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
lowercase_ : str = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' )
lowercase_ : List[Any] = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' )
lowercase_ : Dict = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' )
if additional_replacements is not None:
for replacement in additional_replacements:
lowercase_ : Dict = new_path.replace(replacement['''old'''] , replacement['''new'''] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
lowercase_ : Optional[Any] = old_checkpoint[path['''old''']][:, :, 0]
else:
lowercase_ : Optional[Any] = old_checkpoint[path['''old''']]
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
lowercase_ : List[Any] = {}
lowercase_ : Optional[int] = checkpoint['''time_embed.0.weight''']
lowercase_ : Optional[Any] = checkpoint['''time_embed.0.bias''']
lowercase_ : Optional[Any] = checkpoint['''time_embed.2.weight''']
lowercase_ : int = checkpoint['''time_embed.2.bias''']
lowercase_ : Optional[Any] = checkpoint['''input_blocks.0.0.weight''']
lowercase_ : List[str] = checkpoint['''input_blocks.0.0.bias''']
lowercase_ : int = checkpoint['''out.0.weight''']
lowercase_ : Tuple = checkpoint['''out.0.bias''']
lowercase_ : Dict = checkpoint['''out.2.weight''']
lowercase_ : Any = checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
lowercase_ : int = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} )
lowercase_ : Union[str, Any] = {
layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key]
for layer_id in range(__SCREAMING_SNAKE_CASE )
}
# Retrieves the keys for the middle blocks only
lowercase_ : int = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} )
lowercase_ : str = {
layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key]
for layer_id in range(__SCREAMING_SNAKE_CASE )
}
# Retrieves the keys for the output blocks only
lowercase_ : List[Any] = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} )
lowercase_ : str = {
layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key]
for layer_id in range(__SCREAMING_SNAKE_CASE )
}
for i in range(1 , __SCREAMING_SNAKE_CASE ):
lowercase_ : Any = (i - 1) // (config['''num_res_blocks'''] + 1)
lowercase_ : Any = (i - 1) % (config['''num_res_blocks'''] + 1)
lowercase_ : List[str] = [key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key]
lowercase_ : List[Any] = [key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key]
if F'''input_blocks.{i}.0.op.weight''' in checkpoint:
lowercase_ : List[Any] = checkpoint[
F'''input_blocks.{i}.0.op.weight'''
]
lowercase_ : List[str] = checkpoint[
F'''input_blocks.{i}.0.op.bias'''
]
continue
lowercase_ : Union[str, Any] = renew_resnet_paths(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = {'''old''': F'''input_blocks.{i}.0''', '''new''': F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
lowercase_ : List[Any] = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , additional_replacements=[meta_path, resnet_op] , config=__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = renew_attention_paths(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = {
'''old''': F'''input_blocks.{i}.1''',
'''new''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
lowercase_ : Dict = {
F'''input_blocks.{i}.1.qkv.bias''': {
'''key''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
'''query''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
'''value''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''input_blocks.{i}.1.qkv.weight''': {
'''key''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
'''query''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
'''value''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , attention_paths_to_split=__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE , )
lowercase_ : int = middle_blocks[0]
lowercase_ : Union[str, Any] = middle_blocks[1]
lowercase_ : Optional[Any] = middle_blocks[2]
lowercase_ : Dict = renew_resnet_paths(__SCREAMING_SNAKE_CASE )
assign_to_checkpoint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = renew_resnet_paths(__SCREAMING_SNAKE_CASE )
assign_to_checkpoint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = renew_attention_paths(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = {
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , attention_paths_to_split=__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE )
for i in range(__SCREAMING_SNAKE_CASE ):
lowercase_ : List[Any] = i // (config['''num_res_blocks'''] + 1)
lowercase_ : int = i % (config['''num_res_blocks'''] + 1)
lowercase_ : Optional[Any] = [shave_segments(__SCREAMING_SNAKE_CASE , 2 ) for name in output_blocks[i]]
lowercase_ : Any = {}
for layer in output_block_layers:
lowercase_ , lowercase_ : Any = layer.split('''.''' )[0], shave_segments(__SCREAMING_SNAKE_CASE , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(__SCREAMING_SNAKE_CASE )
else:
lowercase_ : Optional[Any] = [layer_name]
if len(__SCREAMING_SNAKE_CASE ) > 1:
lowercase_ : List[Any] = [key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key]
lowercase_ : Union[str, Any] = [key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key]
lowercase_ : Optional[Any] = renew_resnet_paths(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = renew_resnet_paths(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = {'''old''': F'''output_blocks.{i}.0''', '''new''': F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=__SCREAMING_SNAKE_CASE )
if ["conv.weight", "conv.bias"] in output_block_list.values():
lowercase_ : Any = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] )
lowercase_ : str = checkpoint[
F'''output_blocks.{i}.{index}.conv.weight'''
]
lowercase_ : Union[str, Any] = checkpoint[
F'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(__SCREAMING_SNAKE_CASE ) == 2:
lowercase_ : Tuple = []
if len(__SCREAMING_SNAKE_CASE ):
lowercase_ : Union[str, Any] = renew_attention_paths(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = {
'''old''': F'''output_blocks.{i}.1''',
'''new''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
lowercase_ : List[str] = {
F'''output_blocks.{i}.1.qkv.bias''': {
'''key''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
'''query''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
'''value''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''output_blocks.{i}.1.qkv.weight''': {
'''key''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
'''query''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
'''value''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=__SCREAMING_SNAKE_CASE , )
else:
lowercase_ : int = renew_resnet_paths(__SCREAMING_SNAKE_CASE , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
lowercase_ : Optional[int] = '''.'''.join(['''output_blocks''', str(__SCREAMING_SNAKE_CASE ), path['''old''']] )
lowercase_ : Union[str, Any] = '''.'''.join(['''up_blocks''', str(__SCREAMING_SNAKE_CASE ), '''resnets''', str(__SCREAMING_SNAKE_CASE ), path['''new''']] )
lowercase_ : Optional[Any] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
_lowercase : int = parser.parse_args()
_lowercase : Dict = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
_lowercase : str = json.loads(f.read())
_lowercase : Union[str, Any] = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
_lowercase : Any = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
_lowercase : str = DDPMScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1]))
_lowercase : List[str] = VQModel.from_pretrained("/".join(args.checkpoint_path.split("/")[:-1]))
_lowercase : Optional[Any] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 93 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase_ : Optional[int] = {
'facebook/mask2former-swin-small-coco-instance': (
'https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Any = '''mask2former'''
snake_case__ : Any = ['''swin''']
snake_case__ : str = {'''hidden_size''': '''hidden_dim'''}
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Dict] = None , SCREAMING_SNAKE_CASE__ : int = 2_5_6 , SCREAMING_SNAKE_CASE__ : int = 2_5_6 , SCREAMING_SNAKE_CASE__ : int = 2_5_6 , SCREAMING_SNAKE_CASE__ : int = 1_0_2_4 , SCREAMING_SNAKE_CASE__ : str = "relu" , SCREAMING_SNAKE_CASE__ : int = 6 , SCREAMING_SNAKE_CASE__ : int = 1_0 , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : int = 2_0_4_8 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : int = 4 , SCREAMING_SNAKE_CASE__ : int = 2_5_5 , SCREAMING_SNAKE_CASE__ : int = 1_0_0 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 2.0 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : int = 1_2_5_4_4 , SCREAMING_SNAKE_CASE__ : float = 3.0 , SCREAMING_SNAKE_CASE__ : float = 0.75 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : List[int] = [4, 8, 1_6, 3_2] , SCREAMING_SNAKE_CASE__ : bool = None , **SCREAMING_SNAKE_CASE__ : int , ) -> List[Any]:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
a_ : Dict = CONFIG_MAPPING['swin'](
image_size=2_2_4 , in_channels=3 , patch_size=4 , embed_dim=9_6 , depths=[2, 2, 1_8, 2] , num_heads=[3, 6, 1_2, 2_4] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=SCREAMING_SNAKE_CASE__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
a_ : Any = backbone_config.pop('model_type' )
a_ : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
a_ : List[str] = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
F"""Supported model types: {",".join(self.backbones_supported )}""" )
a_ : Dict = backbone_config
a_ : List[str] = feature_size
a_ : List[str] = mask_feature_size
a_ : int = hidden_dim
a_ : Dict = encoder_feedforward_dim
a_ : str = activation_function
a_ : List[str] = encoder_layers
a_ : List[str] = decoder_layers
a_ : Dict = num_attention_heads
a_ : str = dropout
a_ : Tuple = dim_feedforward
a_ : List[str] = pre_norm
a_ : Optional[int] = enforce_input_projection
a_ : Any = common_stride
a_ : Optional[int] = ignore_value
a_ : int = num_queries
a_ : Tuple = no_object_weight
a_ : Dict = class_weight
a_ : Optional[int] = mask_weight
a_ : Optional[int] = dice_weight
a_ : str = train_num_points
a_ : List[str] = oversample_ratio
a_ : List[Any] = importance_sample_ratio
a_ : Any = init_std
a_ : Union[str, Any] = init_xavier_std
a_ : Union[str, Any] = use_auxiliary_loss
a_ : Dict = feature_strides
a_ : List[str] = output_auxiliary_logits
a_ : Dict = decoder_layers
super().__init__(**SCREAMING_SNAKE_CASE__ )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str , SCREAMING_SNAKE_CASE__ : PretrainedConfig , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[Any]:
return cls(
backbone_config=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict[str, any]:
a_ : Optional[int] = copy.deepcopy(self.__dict__ )
a_ : List[Any] = self.backbone_config.to_dict()
a_ : Optional[Any] = self.__class__.model_type
return output
| 32 | 0 |
"""simple docstring"""
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def snake_case ( A__ ):
return EnvironmentCommand()
class UpperCamelCase_ (__A ):
@staticmethod
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase_ : ArgumentParser ) -> Any:
UpperCAmelCase_ : int = parser.add_parser("env" )
download_parser.set_defaults(func=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
UpperCAmelCase_ : Optional[int] = huggingface_hub.__version__
UpperCAmelCase_ : Optional[Any] = "not installed"
UpperCAmelCase_ : List[Any] = "NA"
if is_torch_available():
import torch
UpperCAmelCase_ : Dict = torch.__version__
UpperCAmelCase_ : List[str] = torch.cuda.is_available()
UpperCAmelCase_ : List[str] = "not installed"
if is_transformers_available():
import transformers
UpperCAmelCase_ : Tuple = transformers.__version__
UpperCAmelCase_ : Tuple = "not installed"
if is_accelerate_available():
import accelerate
UpperCAmelCase_ : Tuple = accelerate.__version__
UpperCAmelCase_ : Dict = "not installed"
if is_xformers_available():
import xformers
UpperCAmelCase_ : int = xformers.__version__
UpperCAmelCase_ : List[Any] = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": f"""{pt_version} ({pt_cuda_available})""",
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(lowerCAmelCase_ ) )
return info
@staticmethod
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 253 |
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def snake_case ( A__ ,A__ ):
assert isinstance(A__ ,A__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" ,[False, True] )
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Optional[int] = tmp_path / "cache"
UpperCAmelCase_ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase_ : Union[str, Any] = ParquetDatasetReader(A__ ,cache_dir=A__ ,keep_in_memory=A__ ).read()
_check_parquet_dataset(A__ ,A__ )
@pytest.mark.parametrize(
"features" ,[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] ,)
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : str = tmp_path / "cache"
UpperCAmelCase_ : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : Any = features.copy() if features else default_expected_features
UpperCAmelCase_ : int = (
Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase_ : List[Any] = ParquetDatasetReader(A__ ,features=A__ ,cache_dir=A__ ).read()
_check_parquet_dataset(A__ ,A__ )
@pytest.mark.parametrize("split" ,[None, NamedSplit("train" ), "train", "test"] )
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : List[Any] = tmp_path / "cache"
UpperCAmelCase_ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : int = ParquetDatasetReader(A__ ,cache_dir=A__ ,split=A__ ).read()
_check_parquet_dataset(A__ ,A__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" ,[str, list] )
def snake_case ( A__ ,A__ ,A__ ):
if issubclass(A__ ,A__ ):
UpperCAmelCase_ : int = parquet_path
elif issubclass(A__ ,A__ ):
UpperCAmelCase_ : Any = [parquet_path]
UpperCAmelCase_ : Dict = tmp_path / "cache"
UpperCAmelCase_ : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : Tuple = ParquetDatasetReader(A__ ,cache_dir=A__ ).read()
_check_parquet_dataset(A__ ,A__ )
def snake_case ( A__ ,A__ ,A__=("train",) ):
assert isinstance(A__ ,A__ )
for split in splits:
UpperCAmelCase_ : Any = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" ,[False, True] )
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Union[str, Any] = tmp_path / "cache"
UpperCAmelCase_ : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase_ : Dict = ParquetDatasetReader(
{"train": parquet_path} ,cache_dir=A__ ,keep_in_memory=A__ ).read()
_check_parquet_datasetdict(A__ ,A__ )
@pytest.mark.parametrize(
"features" ,[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] ,)
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Optional[int] = tmp_path / "cache"
UpperCAmelCase_ : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : int = features.copy() if features else default_expected_features
UpperCAmelCase_ : int = (
Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase_ : Any = ParquetDatasetReader({"train": parquet_path} ,features=A__ ,cache_dir=A__ ).read()
_check_parquet_datasetdict(A__ ,A__ )
@pytest.mark.parametrize("split" ,[None, NamedSplit("train" ), "train", "test"] )
def snake_case ( A__ ,A__ ,A__ ):
if split:
UpperCAmelCase_ : Optional[Any] = {split: parquet_path}
else:
UpperCAmelCase_ : Union[str, Any] = "train"
UpperCAmelCase_ : Dict = {"train": parquet_path, "test": parquet_path}
UpperCAmelCase_ : Union[str, Any] = tmp_path / "cache"
UpperCAmelCase_ : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : str = ParquetDatasetReader(A__ ,cache_dir=A__ ).read()
_check_parquet_datasetdict(A__ ,A__ ,splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : str = ParquetDatasetWriter(A__ ,tmp_path / "foo.parquet" )
assert writer.write() > 0
UpperCAmelCase_ : List[str] = pq.ParquetFile(tmp_path / "foo.parquet" )
UpperCAmelCase_ : Optional[Any] = pf.read()
assert dataset.data.table == output_table
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : List[str] = str(shared_datadir / "test_image_rgb.jpg" )
UpperCAmelCase_ : Optional[Any] = {"image": [image_path]}
UpperCAmelCase_ : Optional[Any] = Features({"image": Image()} )
UpperCAmelCase_ : List[Any] = Dataset.from_dict(A__ ,features=A__ )
UpperCAmelCase_ : str = ParquetDatasetWriter(A__ ,tmp_path / "foo.parquet" )
assert writer.write() > 0
UpperCAmelCase_ : Tuple = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
UpperCAmelCase_ : Any = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) ,streaming=A__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" ,[
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] ,)
def snake_case ( A__ ,A__ ):
assert get_writer_batch_size(A__ ) == expected
| 253 | 1 |
def A (__A : int ) -> list:
"""simple docstring"""
UpperCAmelCase_ = int(__A )
if n_element < 1:
UpperCAmelCase_ = ValueError('''a should be a positive number''' )
raise my_error
UpperCAmelCase_ = [1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = (0, 0, 0)
UpperCAmelCase_ = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
snake_case_ : Optional[Any] = input("Enter the last number (nth term) of the Hamming Number Series: ")
print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
snake_case_ : str = hamming(int(n))
print("-----------------------------------------------------")
print(f"The list with nth numbers is: {hamming_numbers}")
print("-----------------------------------------------------")
| 51 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : int = {
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = ["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] = [
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
snake_case_ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51 | 1 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(_a , _a )
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = emb.weight.shape
lowerCAmelCase__ : List[str] = nn.Linear(_a , _a , bias=_a )
lowerCAmelCase__ : str = emb.weight.data
return lin_layer
def lowerCamelCase_ ( _a , _a=None ):
"""simple docstring"""
lowerCAmelCase__ : Dict = {}
for old_key in state_dict.keys():
lowerCAmelCase__ : Any = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
lowerCAmelCase__ : int = key.replace('''moe_layer.experts.0''' , f'ffn.experts.expert_{expert_idx}' )
else:
lowerCAmelCase__ : str = key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
lowerCAmelCase__ : List[str] = key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
lowerCAmelCase__ : Any = key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
lowerCAmelCase__ : List[Any] = key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
lowerCAmelCase__ : List[str] = key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
lowerCAmelCase__ : Dict = key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
lowerCAmelCase__ : Union[str, Any] = key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
lowerCAmelCase__ : Any = state_dict[old_key]
return new_dict
def lowerCamelCase_ ( _a , _a , _a , _a , _a = WEIGHTS_NAME ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : int = 0
os.makedirs(_a , exist_ok=_a )
for expert in range(_a ):
lowerCAmelCase__ : Dict = switch_checkpoint_path + f'-rank-{expert}.pt'
if os.path.isfile(_a ):
lowerCAmelCase__ : Any = torch.load(_a )['''model''']
remove_ignore_keys_(_a )
lowerCAmelCase__ : Any = rename_fairseq_keys(_a , _a )
lowerCAmelCase__ : List[Any] = os.path.join(
_a , weights_name.replace('''.bin''' , f'-{len(_a )+1:05d}-of-???.bin' ) )
torch.save(_a , _a )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_a )[0]].dtype )
# Add the last block
lowerCAmelCase__ : Optional[int] = os.path.join(_a , weights_name.replace('''.bin''' , f'-{len(_a )+1:05d}-of-???.bin' ) )
lowerCAmelCase__ : List[str] = torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(_a )
lowerCAmelCase__ : int = rename_fairseq_keys(_a , _a )
lowerCAmelCase__ : Optional[Any] = shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_a ) == 1:
lowerCAmelCase__ : str = os.path.join(_a , _a )
torch.save(_a , _a )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_a , _a )
# Otherwise, let's build the index
lowerCAmelCase__ : Dict = {}
for idx, shard in enumerate(_a ):
lowerCAmelCase__ : str = weights_name.replace('''.bin''' , f'-{idx+1:05d}-of-{len(_a ):05d}.bin' )
lowerCAmelCase__ : Union[str, Any] = os.path.join(_a , weights_name.replace('''.bin''' , f'-{idx+1:05d}-of-???.bin' ) )
os.rename(_a , os.path.join(_a , _a ) )
for key in shard:
lowerCAmelCase__ : List[str] = shard_file
# Add the metadata
lowerCAmelCase__ : List[str] = {'''total_size''': total_size}
lowerCAmelCase__ : Tuple = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(_a , _a ) , '''w''' , encoding='''utf-8''' ) as f:
lowerCAmelCase__ : Tuple = json.dumps(_a , indent=2 , sort_keys=_a ) + '''\n'''
f.write(_a )
return metadata, index
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
lowerCamelCase = parser.parse_args()
lowerCamelCase, lowerCamelCase = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
lowerCamelCase = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
lowerCamelCase = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path)
| 211 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['''BeitFeatureExtractor''']
lowerCamelCase = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 211 | 1 |
from ..utils import DummyObject, requires_backends
class A_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase__ = ["""flax"""]
def __init__(self :List[str] , *_UpperCamelCase :int , **_UpperCamelCase :Dict )-> List[str]:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowerCAmelCase (cls :int , *_UpperCamelCase :Union[str, Any] , **_UpperCamelCase :Tuple )-> List[str]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowerCAmelCase (cls :Any , *_UpperCamelCase :Optional[int] , **_UpperCamelCase :Any )-> Optional[int]:
requires_backends(cls , ['''flax'''] )
class A_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase__ = ["""flax"""]
def __init__(self :List[str] , *_UpperCamelCase :Dict , **_UpperCamelCase :Tuple )-> int:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowerCAmelCase (cls :int , *_UpperCamelCase :List[Any] , **_UpperCamelCase :Union[str, Any] )-> str:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowerCAmelCase (cls :Optional[Any] , *_UpperCamelCase :Tuple , **_UpperCamelCase :int )-> int:
requires_backends(cls , ['''flax'''] )
class A_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase__ = ["""flax"""]
def __init__(self :int , *_UpperCamelCase :Tuple , **_UpperCamelCase :Dict )-> int:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowerCAmelCase (cls :Any , *_UpperCamelCase :str , **_UpperCamelCase :Optional[Any] )-> int:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowerCAmelCase (cls :Any , *_UpperCamelCase :Tuple , **_UpperCamelCase :str )-> List[Any]:
requires_backends(cls , ['''flax'''] )
class A_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase__ = ["""flax"""]
def __init__(self :Optional[int] , *_UpperCamelCase :Union[str, Any] , **_UpperCamelCase :Any )-> int:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowerCAmelCase (cls :Union[str, Any] , *_UpperCamelCase :List[str] , **_UpperCamelCase :Union[str, Any] )-> List[str]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowerCAmelCase (cls :Tuple , *_UpperCamelCase :List[Any] , **_UpperCamelCase :Optional[Any] )-> str:
requires_backends(cls , ['''flax'''] )
class A_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase__ = ["""flax"""]
def __init__(self :Optional[Any] , *_UpperCamelCase :Optional[int] , **_UpperCamelCase :Dict )-> Optional[int]:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowerCAmelCase (cls :Tuple , *_UpperCamelCase :Dict , **_UpperCamelCase :Tuple )-> Optional[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowerCAmelCase (cls :int , *_UpperCamelCase :Any , **_UpperCamelCase :List[Any] )-> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
class A_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase__ = ["""flax"""]
def __init__(self :Optional[Any] , *_UpperCamelCase :Dict , **_UpperCamelCase :Union[str, Any] )-> Dict:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowerCAmelCase (cls :Optional[int] , *_UpperCamelCase :Optional[Any] , **_UpperCamelCase :Any )-> List[str]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowerCAmelCase (cls :str , *_UpperCamelCase :str , **_UpperCamelCase :Union[str, Any] )-> str:
requires_backends(cls , ['''flax'''] )
class A_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase__ = ["""flax"""]
def __init__(self :str , *_UpperCamelCase :Any , **_UpperCamelCase :int )-> List[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowerCAmelCase (cls :Any , *_UpperCamelCase :List[Any] , **_UpperCamelCase :Union[str, Any] )-> int:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowerCAmelCase (cls :List[Any] , *_UpperCamelCase :Tuple , **_UpperCamelCase :Dict )-> int:
requires_backends(cls , ['''flax'''] )
class A_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase__ = ["""flax"""]
def __init__(self :int , *_UpperCamelCase :Optional[int] , **_UpperCamelCase :Optional[Any] )-> Optional[int]:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowerCAmelCase (cls :int , *_UpperCamelCase :Optional[int] , **_UpperCamelCase :Optional[int] )-> Optional[int]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowerCAmelCase (cls :Tuple , *_UpperCamelCase :int , **_UpperCamelCase :Optional[Any] )-> str:
requires_backends(cls , ['''flax'''] )
class A_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase__ = ["""flax"""]
def __init__(self :Optional[int] , *_UpperCamelCase :int , **_UpperCamelCase :int )-> int:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowerCAmelCase (cls :Dict , *_UpperCamelCase :Dict , **_UpperCamelCase :Tuple )-> Dict:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowerCAmelCase (cls :Dict , *_UpperCamelCase :Any , **_UpperCamelCase :Optional[int] )-> Optional[int]:
requires_backends(cls , ['''flax'''] )
class A_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase__ = ["""flax"""]
def __init__(self :Optional[int] , *_UpperCamelCase :Any , **_UpperCamelCase :List[Any] )-> Tuple:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowerCAmelCase (cls :Optional[Any] , *_UpperCamelCase :Union[str, Any] , **_UpperCamelCase :Tuple )-> Optional[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowerCAmelCase (cls :Optional[Any] , *_UpperCamelCase :Optional[int] , **_UpperCamelCase :Union[str, Any] )-> List[str]:
requires_backends(cls , ['''flax'''] )
class A_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase__ = ["""flax"""]
def __init__(self :Optional[int] , *_UpperCamelCase :Optional[Any] , **_UpperCamelCase :str )-> Any:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowerCAmelCase (cls :Optional[Any] , *_UpperCamelCase :Any , **_UpperCamelCase :Union[str, Any] )-> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowerCAmelCase (cls :Tuple , *_UpperCamelCase :Union[str, Any] , **_UpperCamelCase :Optional[Any] )-> List[str]:
requires_backends(cls , ['''flax'''] )
class A_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase__ = ["""flax"""]
def __init__(self :int , *_UpperCamelCase :Optional[Any] , **_UpperCamelCase :List[Any] )-> Dict:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowerCAmelCase (cls :Dict , *_UpperCamelCase :str , **_UpperCamelCase :Optional[int] )-> str:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowerCAmelCase (cls :Any , *_UpperCamelCase :Union[str, Any] , **_UpperCamelCase :Any )-> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
class A_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase__ = ["""flax"""]
def __init__(self :Optional[Any] , *_UpperCamelCase :str , **_UpperCamelCase :Optional[Any] )-> Optional[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowerCAmelCase (cls :Dict , *_UpperCamelCase :Dict , **_UpperCamelCase :str )-> List[str]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowerCAmelCase (cls :List[str] , *_UpperCamelCase :str , **_UpperCamelCase :Optional[int] )-> Tuple:
requires_backends(cls , ['''flax'''] )
| 117 |
from __future__ import annotations
def _a ( lowerCamelCase: list[float] , lowerCamelCase: Tuple ) -> List[str]:
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(lowerCamelCase ):
print(F"""{i}\t\t{d}""" )
def _a ( lowerCamelCase: list[dict[str, int]] , lowerCamelCase: list[float] , lowerCamelCase: int ) -> Union[str, Any]:
'''simple docstring'''
for j in range(lowerCamelCase ):
__A , __A , __A = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def _a ( lowerCamelCase: list[dict[str, int]] , lowerCamelCase: int , lowerCamelCase: int , lowerCamelCase: int ) -> list[float]:
'''simple docstring'''
__A = [float('''inf''' )] * vertex_count
__A = 0.0
for _ in range(vertex_count - 1 ):
for j in range(lowerCamelCase ):
__A , __A , __A = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__A = distance[u] + w
__A = check_negative_cycle(lowerCamelCase , lowerCamelCase , lowerCamelCase )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ : Dict = int(input('Enter number of vertices: ').strip())
snake_case__ : Optional[int] = int(input('Enter number of edges: ').strip())
snake_case__ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('Edge ', i + 1)
snake_case__ , snake_case__ , snake_case__ : Dict = (
int(x)
for x in input('Enter source, destination, weight: ').strip().split(' ')
)
snake_case__ : List[Any] = {'src': src, 'dst': dest, 'weight': weight}
snake_case__ : Union[str, Any] = int(input('\nEnter shortest path source:').strip())
snake_case__ : List[Any] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 117 | 1 |
import argparse
import os
import re
_UpperCAmelCase = """src/transformers/models/auto"""
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
_UpperCAmelCase = re.compile(r"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""")
# re pattern that matches identifiers in mappings
_UpperCAmelCase = re.compile(r"""\s*\(\s*\"(\S[^\"]+)\"""")
def UpperCamelCase ( __lowercase : Tuple ,__lowercase : bool = False ):
'''simple docstring'''
with open(__lowercase ,'r' ,encoding='utf-8' ) as f:
A_ : int = f.read()
A_ : Tuple = content.split('\n' )
A_ : Any = []
A_ : int = 0
while line_idx < len(__lowercase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
A_ : Dict = len(re.search(r'^(\s*)\S' ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(' ' * indent + '(' ):
new_lines.append(lines[line_idx] )
line_idx += 1
A_ : int = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
A_ : str = line_idx
while not lines[line_idx].startswith(' ' * indent + ')' ):
line_idx += 1
blocks.append('\n'.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
A_ : List[Any] = sorted(__lowercase ,key=lambda __lowercase : _re_identifier.search(__lowercase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(__lowercase ,'w' ,encoding='utf-8' ) as f:
f.write('\n'.join(__lowercase ) )
elif "\n".join(__lowercase ) != content:
return True
def UpperCamelCase ( __lowercase : bool = False ):
'''simple docstring'''
A_ : Dict = [os.path.join(__lowercase ,__lowercase ) for f in os.listdir(__lowercase ) if f.endswith('.py' )]
A_ : Any = [sort_auto_mapping(__lowercase ,overwrite=__lowercase ) for fname in fnames]
if not overwrite and any(__lowercase ):
A_ : Dict = [f for f, d in zip(__lowercase ,__lowercase ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {', '.join(__lowercase )}. Run `make style` to fix'''
' this.' )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
_UpperCAmelCase = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 367 | import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : int = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Optional[Any] = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = self.dummy_uncond_unet
A_ : List[Any] = DDIMScheduler()
A_ : Any = self.dummy_vq_model
A_ : int = LDMPipeline(unet=lowercase , vqvae=lowercase , scheduler=lowercase )
ldm.to(lowercase )
ldm.set_progress_bar_config(disable=lowercase )
A_ : Any = torch.manual_seed(0 )
A_ : Dict = ldm(generator=lowercase , num_inference_steps=2 , output_type='numpy' ).images
A_ : Any = torch.manual_seed(0 )
A_ : List[str] = ldm(generator=lowercase , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase )[0]
A_ : Union[str, Any] = image[0, -3:, -3:, -1]
A_ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
A_ : List[str] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
A_ : str = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(lowercase )
ldm.set_progress_bar_config(disable=lowercase )
A_ : Any = torch.manual_seed(0 )
A_ : List[str] = ldm(generator=lowercase , num_inference_steps=5 , output_type='numpy' ).images
A_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
A_ : Tuple = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
A_ : Dict = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 192 | 0 |
"""simple docstring"""
import socket
def lowerCamelCase__ ( ) -> Optional[int]:
lowerCamelCase_ = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
lowerCamelCase_ = socket.gethostname()
lowerCamelCase_ = 12312
sock.connect((host, port) )
sock.send(b'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
lowerCamelCase_ = sock.recv(1024 )
if not data:
break
out_file.write(_lowerCamelCase )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 183 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {'vocab_file': 'spiece.model'}
__lowerCAmelCase : Optional[int] = {
'vocab_file': {
'bert_for_seq_generation': (
'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'
),
}
}
__lowerCAmelCase : Dict = {'bert_for_seq_generation': 512}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : List[int] = []
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str="<s>" , __lowerCamelCase : Optional[int]="</s>" , __lowerCamelCase : int="<unk>" , __lowerCamelCase : List[str]="<pad>" , __lowerCamelCase : Union[str, Any]="<::::>" , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : List[str] , ) -> None:
a = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , sep_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
a = vocab_file
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def __UpperCAmelCase ( self : Dict ) -> Dict:
return self.sp_model.get_piece_size()
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
a = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ) -> Optional[Any]:
a = self.__dict__.copy()
a = None
return state
def __setstate__( self : Optional[Any] , __lowerCamelCase : Dict ) -> Optional[Any]:
a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def __UpperCAmelCase ( self : str , __lowerCamelCase : Union[str, Any] ) -> int:
return self.sp_model.piece_to_id(__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : List[str] ) -> Any:
a = self.sp_model.IdToPiece(__lowerCamelCase )
return token
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Dict ) -> Any:
a = []
a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCamelCase ) + token
a = []
else:
current_sub_tokens.append(__lowerCamelCase )
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 107 | 0 |
def _a ( ) -> Any:
'''simple docstring'''
for n in range(1 , 1_00_00_00 ):
yield n * (n + 1) // 2
def _a ( SCREAMING_SNAKE_CASE__ : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
SCREAMING_SNAKE_CASE__ : Any = 2
while i * i <= n:
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def _a ( ) -> Optional[int]:
'''simple docstring'''
return next(i for i in triangle_number_generator() if count_divisors(SCREAMING_SNAKE_CASE__ ) > 5_00 )
if __name__ == "__main__":
print(solution())
| 361 |
class lowerCamelCase :
"""simple docstring"""
def __init__( self : int, _UpperCAmelCase : Dict, _UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = name
SCREAMING_SNAKE_CASE__ : Tuple = val
def __str__( self : str ) -> List[Any]:
"""simple docstring"""
return F'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self : str, _UpperCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
return self.val < other.val
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any], _UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
SCREAMING_SNAKE_CASE__ : str = {}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.build_heap(_UpperCAmelCase )
def __getitem__( self : Union[str, Any], _UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
return self.get_value(_UpperCAmelCase )
def A_ ( self : int, _UpperCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
return (idx - 1) // 2
def A_ ( self : Optional[Any], _UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
return idx * 2 + 1
def A_ ( self : str, _UpperCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return idx * 2 + 2
def A_ ( self : Tuple, _UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
return self.heap_dict[key]
def A_ ( self : Optional[int], _UpperCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = len(_UpperCAmelCase ) - 1
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_parent_idx(_UpperCAmelCase )
for idx, i in enumerate(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : Optional[int] = idx
SCREAMING_SNAKE_CASE__ : Optional[Any] = i.val
for i in range(_UpperCAmelCase, -1, -1 ):
self.sift_down(_UpperCAmelCase, _UpperCAmelCase )
return array
def A_ ( self : List[Any], _UpperCAmelCase : Dict, _UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
while True:
SCREAMING_SNAKE_CASE__ : str = self.get_left_child_idx(_UpperCAmelCase ) # noqa: E741
SCREAMING_SNAKE_CASE__ : Tuple = self.get_right_child_idx(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = idx
if l < len(_UpperCAmelCase ) and array[l] < array[idx]:
SCREAMING_SNAKE_CASE__ : List[Any] = l
if r < len(_UpperCAmelCase ) and array[r] < array[smallest]:
SCREAMING_SNAKE_CASE__ : int = r
if smallest != idx:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Any = array[smallest], array[idx]
(
(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,
) : Optional[Any] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = smallest
else:
break
def A_ ( self : Union[str, Any], _UpperCAmelCase : str ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.get_parent_idx(_UpperCAmelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Any = self.heap[idx], self.heap[p]
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : int = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
SCREAMING_SNAKE_CASE__ : Dict = p
SCREAMING_SNAKE_CASE__ : List[str] = self.get_parent_idx(_UpperCAmelCase )
def A_ ( self : str ) -> List[str]:
"""simple docstring"""
return self.heap[0]
def A_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[int] = self.heap[-1], self.heap[0]
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[Any] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
SCREAMING_SNAKE_CASE__ : Any = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0, self.heap )
return x
def A_ ( self : Union[str, Any], _UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
self.heap.append(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = len(self.heap ) - 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = node.val
self.sift_up(len(self.heap ) - 1 )
def A_ ( self : Optional[int] ) -> int:
"""simple docstring"""
return len(self.heap ) == 0
def A_ ( self : Any, _UpperCAmelCase : Tuple, _UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
SCREAMING_SNAKE_CASE__ : Tuple = new_value
SCREAMING_SNAKE_CASE__ : List[Any] = new_value
self.sift_up(self.idx_of_element[node] )
_lowerCamelCase : Tuple = Node('''R''', -1)
_lowerCamelCase : int = Node('''B''', 6)
_lowerCamelCase : str = Node('''A''', 3)
_lowerCamelCase : Optional[Any] = Node('''X''', 1)
_lowerCamelCase : str = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
_lowerCamelCase : int = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -1_7)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 191 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] =["pixel_values"]
def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 1 / 255 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> str:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = size if size is not None else {"shortest_edge": 384}
UpperCamelCase :Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = do_resize
UpperCamelCase :Optional[int] = size
# Default value set here for backwards compatibility where the value in config is None
UpperCamelCase :str = crop_pct if crop_pct is not None else 224 / 256
UpperCamelCase :Tuple = resample
UpperCamelCase :Union[str, Any] = do_rescale
UpperCamelCase :List[str] = rescale_factor
UpperCamelCase :Tuple = do_normalize
UpperCamelCase :Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase :Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> int:
UpperCamelCase :Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
UpperCamelCase :Optional[Any] = size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
UpperCamelCase :Dict = int(shortest_edge / crop_pct )
UpperCamelCase :Optional[Any] = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=SCREAMING_SNAKE_CASE_ , size=(shortest_edge, shortest_edge) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
SCREAMING_SNAKE_CASE_ , size=(shortest_edge, shortest_edge) , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> int:
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> Dict:
return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ) -> List[str]:
UpperCamelCase :Optional[int] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase :str = crop_pct if crop_pct is not None else self.crop_pct
UpperCamelCase :Dict = resample if resample is not None else self.resample
UpperCamelCase :List[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase :Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase :str = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase :Optional[int] = image_mean if image_mean is not None else self.image_mean
UpperCamelCase :Dict = image_std if image_std is not None else self.image_std
UpperCamelCase :Tuple = size if size is not None else self.size
UpperCamelCase :List[str] = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCamelCase :List[Any] = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
UpperCamelCase :List[Any] = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , crop_pct=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
UpperCamelCase :Any = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
UpperCamelCase :Any = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images]
UpperCamelCase :Any = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
UpperCamelCase :Optional[Any] = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
| 259 | '''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def __init__( self : Optional[int] , A : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
super().__init__()
_UpperCAmelCase : Optional[int] = nn.ModuleList(A )
def _A ( self : Dict , A : torch.FloatTensor , A : Union[torch.Tensor, float, int] , A : torch.Tensor , A : List[torch.tensor] , A : List[float] , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[Dict[str, Any]] = None , A : bool = False , A : bool = True , ):
for i, (image, scale, controlnet) in enumerate(zip(A , A , self.nets ) ):
_UpperCAmelCase , _UpperCAmelCase : str = controlnet(
A , A , A , A , A , A , A , A , A , A , A , )
# merge samples
if i == 0:
_UpperCAmelCase , _UpperCAmelCase : List[Any] = down_samples, mid_sample
else:
_UpperCAmelCase : Optional[int] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(A , A )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def _A ( self : List[str] , A : Union[str, os.PathLike] , A : bool = True , A : Callable = None , A : bool = False , A : Optional[str] = None , ):
_UpperCAmelCase : str = 0
_UpperCAmelCase : str = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
A , is_main_process=A , save_function=A , safe_serialization=A , variant=A , )
idx += 1
_UpperCAmelCase : Tuple = model_path_to_save + F"""_{idx}"""
@classmethod
def _A ( cls : int , A : Optional[Union[str, os.PathLike]] , **A : Tuple ):
_UpperCAmelCase : str = 0
_UpperCAmelCase : int = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_UpperCAmelCase : int = pretrained_model_path
while os.path.isdir(A ):
_UpperCAmelCase : List[str] = ControlNetModel.from_pretrained(A , **A )
controlnets.append(A )
idx += 1
_UpperCAmelCase : Dict = pretrained_model_path + F"""_{idx}"""
logger.info(F"""{len(A )} controlnets loaded from {pretrained_model_path}.""" )
if len(A ) == 0:
raise ValueError(
F"""No ControlNets found under {os.path.dirname(A )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(A )
| 31 | 0 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """linear"""
lowerCAmelCase_ = """cosine"""
lowerCAmelCase_ = """cosine_with_restarts"""
lowerCAmelCase_ = """polynomial"""
lowerCAmelCase_ = """constant"""
lowerCAmelCase_ = """constant_with_warmup"""
lowerCAmelCase_ = """piecewise_constant"""
def A_( A : Optimizer , A : int = -1):
return LambdaLR(A , lambda A: 1 , last_epoch=A)
def A_( A : Optimizer , A : int , A : int = -1):
def lr_lambda(A : int):
if current_step < num_warmup_steps:
return float(A) / float(max(1.0 , A))
return 1.0
return LambdaLR(A , A , last_epoch=A)
def A_( A : Optimizer , A : str , A : int = -1):
UpperCamelCase = {}
UpperCamelCase = step_rules.split(',')
for rule_str in rule_list[:-1]:
UpperCamelCase , UpperCamelCase = rule_str.split(':')
UpperCamelCase = int(A)
UpperCamelCase = float(A)
UpperCamelCase = value
UpperCamelCase = float(rule_list[-1])
def create_rules_function(A : Any , A : Optional[int]):
def rule_func(A : int) -> float:
UpperCamelCase = sorted(rules_dict.keys())
for i, sorted_step in enumerate(A):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
UpperCamelCase = create_rules_function(A , A)
return LambdaLR(A , A , last_epoch=A)
def A_( A : Tuple , A : List[Any] , A : Any , A : int=-1):
def lr_lambda(A : int):
if current_step < num_warmup_steps:
return float(A) / float(max(1 , A))
return max(
0.0 , float(num_training_steps - current_step) / float(max(1 , num_training_steps - num_warmup_steps)))
return LambdaLR(A , A , A)
def A_( A : Optimizer , A : int , A : int , A : float = 0.5 , A : int = -1):
def lr_lambda(A : List[str]):
if current_step < num_warmup_steps:
return float(A) / float(max(1 , A))
UpperCamelCase = float(current_step - num_warmup_steps) / float(max(1 , num_training_steps - num_warmup_steps))
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(A) * 2.0 * progress)))
return LambdaLR(A , A , A)
def A_( A : Optimizer , A : int , A : int , A : int = 1 , A : int = -1):
def lr_lambda(A : List[Any]):
if current_step < num_warmup_steps:
return float(A) / float(max(1 , A))
UpperCamelCase = float(current_step - num_warmup_steps) / float(max(1 , num_training_steps - num_warmup_steps))
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(A) * progress) % 1.0))))
return LambdaLR(A , A , A)
def A_( A : Any , A : int , A : Union[str, Any] , A : Dict=1E-7 , A : Optional[Any]=1.0 , A : List[Any]=-1):
UpperCamelCase = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(f'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''')
def lr_lambda(A : int):
if current_step < num_warmup_steps:
return float(A) / float(max(1 , A))
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
UpperCamelCase = lr_init - lr_end
UpperCamelCase = num_training_steps - num_warmup_steps
UpperCamelCase = 1 - (current_step - num_warmup_steps) / decay_steps
UpperCamelCase = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(A , A , A)
lowerCAmelCase : Dict = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def A_( A : Union[str, SchedulerType] , A : Optimizer , A : Optional[str] = None , A : Optional[int] = None , A : Optional[int] = None , A : int = 1 , A : float = 1.0 , A : int = -1 , ):
UpperCamelCase = SchedulerType(A)
UpperCamelCase = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(A , last_epoch=A)
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(A , step_rules=A , last_epoch=A)
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'''{name} requires `num_warmup_steps`, please provide that argument.''')
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(A , num_warmup_steps=A , last_epoch=A)
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'''{name} requires `num_training_steps`, please provide that argument.''')
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
A , num_warmup_steps=A , num_training_steps=A , num_cycles=A , last_epoch=A , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
A , num_warmup_steps=A , num_training_steps=A , power=A , last_epoch=A , )
return schedule_func(
A , num_warmup_steps=A , num_training_steps=A , last_epoch=A)
| 251 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def A_( A : List[Any]):
UpperCamelCase = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(A , A)
def A_( A : Any):
UpperCamelCase = list(s_dict.keys())
for key in keys:
if "transformer_layers" in key:
UpperCamelCase = s_dict.pop(A)
elif "subsample" in key:
UpperCamelCase = s_dict.pop(A)
def A_( A : Optional[int]):
UpperCamelCase , UpperCamelCase = emb.weight.shape
UpperCamelCase = nn.Linear(A , A , bias=A)
UpperCamelCase = emb.weight.data
return lin_layer
def A_( A : Optional[int] , A : List[str]):
UpperCamelCase = torch.load(A , map_location='cpu')
UpperCamelCase = mam_aaa['args']
UpperCamelCase = mam_aaa['model']
UpperCamelCase = state_dict['decoder.output_projection.weight']
remove_ignore_keys_(A)
rename_keys(A)
UpperCamelCase = state_dict['decoder.embed_tokens.weight'].shape[0]
UpperCamelCase = args.share_decoder_input_output_embed
UpperCamelCase = [int(A) for i in args.conv_kernel_sizes.split(',')]
UpperCamelCase = SpeechaTextConfig(
vocab_size=A , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , num_conv_layers=len(A) , conv_channels=args.conv_channels , conv_kernel_sizes=A , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=A , num_beams=5 , max_length=200 , use_cache=A , decoder_start_token_id=2 , early_stopping=A , )
UpperCamelCase = SpeechaTextForConditionalGeneration(A)
UpperCamelCase , UpperCamelCase = model.model.load_state_dict(A , strict=A)
if len(A) > 0 and not set(A) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
f''' but all the following weights are missing {missing}''')
if tie_embeds:
UpperCamelCase = make_linear_from_emb(model.model.decoder.embed_tokens)
else:
UpperCamelCase = lm_head_weights
model.save_pretrained(A)
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
lowerCAmelCase : List[str] = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 251 | 1 |
'''simple docstring'''
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase : Any = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class __UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
__lowercase = XLMProphetNetTokenizer
__lowercase = False
__lowercase = True
def lowerCamelCase ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_snake_case = XLMProphetNetTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = '[PAD]'
_snake_case = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '[PAD]' )
self.assertEqual(vocab_keys[1] , '[CLS]' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(lowerCAmelCase_ ) , 10_12 )
def lowerCamelCase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_12 )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = XLMProphetNetTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
_snake_case = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCAmelCase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
_snake_case = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_snake_case = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
_snake_case = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'[UNK]',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'[UNK]',
'.',
] , )
@cached_property
def lowerCamelCase ( self ):
"""simple docstring"""
return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' )
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 'Hello World!'
_snake_case = [3_53_89, 66_72, 49, 2]
self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_ ) )
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = {'input_ids': [[1_10_73, 8_27_83, 18, 26, 8_27_83, 5_49, 5_15_40, 2_48, 1_72_09, 13_01, 2_17, 20, 21_51_86, 13_25, 1_47, 1_72_09, 13_01, 2_17, 20, 5_63_70, 53, 12_20_20, 20, 1_64_77, 27, 8_73_55, 45_48, 20, 47_28, 7_83_92, 17, 15_99_69, 18, 26, 2_44_91, 6_29, 15, 5_38, 2_27_04, 54_39, 15, 27_88, 2_44_91, 98_85, 15, 4_35_34, 6_05, 15, 8_14, 1_84_03, 3_32_00, 29, 15, 4_35_34, 2_44_58, 1_24_10, 1_11, 2_49_66, 8_36_69, 96_37, 14_40_68, 26, 8_50, 2_23_46, 27, 1_47, 2_49_66, 8_36_69, 8_34_90, 26, 3_91_13, 7_35, 27, 6_89, 6_56, 28_00, 13_39, 46_00, 53, 12_20_20, 11_57_85, 34, 8_16, 13_39, 4_68_87, 18, 1_47, 5_39_05, 19_51, 4_22_38, 4_11_70, 1_77_32, 8_34, 4_36, 15, 2_75_23, 9_87_33, 2_17, 1_47, 55_42, 49_81, 9_30, 1_73_47, 16, 2], [2_00_91, 6_29, 94, 8_27_86, 58, 4_90, 20, 15_28, 84, 5_39_05, 3_44, 8_05_92, 11_01_28, 1_88_22, 52_67, 13_06, 62, 15_25_37, 3_08, 79_97, 4_01, 12_44_27, 5_49, 3_54_42, 2_25, 1_09, 1_50_55, 2_57_48, 1_47, 71_19, 4_37_12, 34, 7_67, 13_53_66, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_92, 6_37_84, 11_94_66, 17, 14_78_08, 8_82_14, 18, 6_56, 81, 32, 32_96, 1_02_80, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name='microsoft/xprophetnet-large-wiki100-cased' , revision='1acad1643ddd54a44df6a1b797ada8373685d90e' , )
| 42 |
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
if isinstance(snake_case , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __lowerCAmelCase :
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(_snake_case , _snake_case )
_lowerCAmelCase = TFVisionTextDualEncoderModel(_snake_case )
_lowerCAmelCase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case )
_lowerCAmelCase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase = {"""vision_model""": vision_model, """text_model""": text_model}
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_snake_case )
_lowerCAmelCase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case )
_lowerCAmelCase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
_lowerCAmelCase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_snake_case )
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_snake_case )
_lowerCAmelCase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
_lowerCAmelCase = after_output[0].numpy()
_lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_snake_case , 1e-5 )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case )
_lowerCAmelCase = model(
input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , output_attentions=_snake_case )
_lowerCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(_snake_case ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase = to_atuple(vision_model.config.image_size )
_lowerCAmelCase = to_atuple(vision_model.config.patch_size )
_lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowerCAmelCase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_lowerCAmelCase = output.text_model_output.attentions
self.assertEqual(len(_snake_case ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def snake_case ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = np.abs((a - b) ).max()
self.assertLessEqual(_snake_case , _snake_case , F'Difference between torch and flax is {diff} (>= {tol}).' )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_save_load(**_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_snake_case )
@slow
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.get_pretrained_model_and_inputs()
_lowerCAmelCase = model_a(**_snake_case )
_lowerCAmelCase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_snake_case )
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_snake_case )
_lowerCAmelCase = model_a(**_snake_case )
_lowerCAmelCase = after_outputs[0].numpy()
_lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_snake_case , 1e-5 )
@require_tf
class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
_lowerCAmelCase = 13
_lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase = random_attention_mask([batch_size, 4] )
_lowerCAmelCase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = TFViTModel(_snake_case , name="""vision_model""" )
_lowerCAmelCase = TFBertModel(_snake_case , name="""text_model""" )
return vision_model, text_model
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFViTModelTester(self )
_lowerCAmelCase = TFBertModelTester(self )
_lowerCAmelCase = vit_model_tester.prepare_config_and_inputs()
_lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = vision_config_and_inputs
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
_lowerCAmelCase = 13
_lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase = random_attention_mask([batch_size, 4] )
_lowerCAmelCase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case )
_lowerCAmelCase = model(
input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , output_attentions=_snake_case )
_lowerCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(_snake_case ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCAmelCase = to_atuple(vision_model.config.image_size )
_lowerCAmelCase = to_atuple(vision_model.config.patch_size )
_lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowerCAmelCase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_lowerCAmelCase = output.text_model_output.attentions
self.assertEqual(len(_snake_case ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = TFDeiTModel(_snake_case , name="""vision_model""" )
_lowerCAmelCase = TFRobertaModel(_snake_case , name="""text_model""" )
return vision_model, text_model
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFDeiTModelTester(self )
_lowerCAmelCase = TFRobertaModelTester(self )
_lowerCAmelCase = vit_model_tester.prepare_config_and_inputs()
_lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = vision_config_and_inputs
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
_lowerCAmelCase = 13
_lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase = random_attention_mask([batch_size, 4] )
_lowerCAmelCase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = TFCLIPVisionModel(_snake_case , name="""vision_model""" )
_lowerCAmelCase = TFBertModel(_snake_case , name="""text_model""" )
return vision_model, text_model
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFCLIPVisionModelTester(self )
_lowerCAmelCase = TFBertModelTester(self )
_lowerCAmelCase = clip_model_tester.prepare_config_and_inputs()
_lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase = vision_config_and_inputs
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_snake_case )
_lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
_lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_lowerCAmelCase = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_snake_case , padding=_snake_case , return_tensors="""np""" )
_lowerCAmelCase = model(**_snake_case )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_lowerCAmelCase = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _snake_case , atol=1e-3 ) )
| 82 | 0 |
import random
def UpperCAmelCase ( a_, a_, a_ ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = a[left_index]
lowerCamelCase : Tuple = left_index + 1
for j in range(left_index + 1, a_ ):
if a[j] < pivot:
lowerCamelCase : Union[str, Any] = a[i], a[j]
i += 1
lowerCamelCase : Optional[Any] = a[i - 1], a[left_index]
return i - 1
def UpperCAmelCase ( a_, a_, a_ ):
'''simple docstring'''
if left < right:
lowerCamelCase : int = random.randint(a_, right - 1 )
lowerCamelCase : int = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
lowerCamelCase : Tuple = partition(a_, a_, a_ )
quick_sort_random(
a_, a_, a_ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
a_, pivot_index + 1, a_ ) # recursive quicksort to the right of the pivot point
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCamelCase : str = input('Enter numbers separated by a comma:\n' ).strip()
lowerCamelCase : Optional[int] = [int(a_ ) for item in user_input.split(',' )]
quick_sort_random(a_, 0, len(a_ ) )
print(a_ )
if __name__ == "__main__":
main()
| 359 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowercase ( __UpperCAmelCase ):
lowercase_ = ['image_processor', 'tokenizer']
lowercase_ = 'ChineseCLIPImageProcessor'
lowercase_ = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ ) -> List[str]:
lowerCamelCase : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCAmelCase_ , )
lowerCamelCase : int = kwargs.pop('feature_extractor' )
lowerCamelCase : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase : Tuple = self.image_processor
def __call__( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ ) -> str:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
lowerCamelCase : Any = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if images is not None:
lowerCamelCase : Dict = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if text is not None and images is not None:
lowerCamelCase : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase_ ) , tensor_type=UpperCAmelCase_ )
def _UpperCamelCase ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ) -> List[str]:
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _UpperCamelCase ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ) -> Optional[int]:
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
lowerCamelCase : List[Any] = self.tokenizer.model_input_names
lowerCamelCase : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _UpperCamelCase ( self ) -> List[str]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCAmelCase_ , )
return self.image_processor_class
| 205 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
a_ : Optional[int] = logging.get_logger("""transformers.models.speecht5""")
a_ : Tuple = {
"""speech_encoder_prenet.layer_norm""": """speecht5.encoder.prenet.feature_projection.layer_norm""",
"""speech_encoder_prenet.post_extract_proj""": """speecht5.encoder.prenet.feature_projection.projection""",
"""speech_encoder_prenet.pos_conv.0""": """speecht5.encoder.prenet.pos_conv_embed.conv""",
"""speech_encoder_prenet.mask_emb""": """speecht5.encoder.prenet.masked_spec_embed""",
}
a_ : int = {
"""text_encoder_prenet.encoder_prenet.0""": """speecht5.encoder.prenet.embed_tokens""",
"""text_encoder_prenet.encoder_prenet.1.alpha""": """speecht5.encoder.prenet.encode_positions.alpha""",
}
a_ : List[Any] = {
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0""": """speecht5.decoder.prenet.layers.0""",
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0""": """speecht5.decoder.prenet.layers.1""",
"""speech_decoder_prenet.decoder_prenet.0.1""": """speecht5.decoder.prenet.final_layer""",
"""speech_decoder_prenet.decoder_prenet.1.alpha""": """speecht5.decoder.prenet.encode_positions.alpha""",
"""speech_decoder_prenet.spkembs_layer.0""": """speecht5.decoder.prenet.speaker_embeds_layer""",
}
a_ : Optional[Any] = {
"""speech_decoder_postnet.feat_out""": """speech_decoder_postnet.feat_out""",
"""speech_decoder_postnet.prob_out""": """speech_decoder_postnet.prob_out""",
"""speech_decoder_postnet.postnet.postnet.0.0""": """speech_decoder_postnet.layers.0.conv""",
"""speech_decoder_postnet.postnet.postnet.0.1""": """speech_decoder_postnet.layers.0.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.1.0""": """speech_decoder_postnet.layers.1.conv""",
"""speech_decoder_postnet.postnet.postnet.1.1""": """speech_decoder_postnet.layers.1.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.2.0""": """speech_decoder_postnet.layers.2.conv""",
"""speech_decoder_postnet.postnet.postnet.2.1""": """speech_decoder_postnet.layers.2.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.3.0""": """speech_decoder_postnet.layers.3.conv""",
"""speech_decoder_postnet.postnet.postnet.3.1""": """speech_decoder_postnet.layers.3.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.4.0""": """speech_decoder_postnet.layers.4.conv""",
"""speech_decoder_postnet.postnet.postnet.4.1""": """speech_decoder_postnet.layers.4.batch_norm""",
}
a_ : str = {
"""text_decoder_prenet.embed_tokens""": """speecht5.decoder.prenet.embed_tokens""",
}
a_ : Tuple = {
"""text_decoder_postnet.output_projection""": """text_decoder_postnet.lm_head""",
}
a_ : List[Any] = {
"""encoder.layers.*.self_attn.k_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj""",
"""encoder.layers.*.self_attn.v_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj""",
"""encoder.layers.*.self_attn.q_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj""",
"""encoder.layers.*.self_attn.out_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj""",
"""encoder.layers.*.self_attn_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.layer_norm""",
"""encoder.layers.*.fc1""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense""",
"""encoder.layers.*.fc2""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense""",
"""encoder.layers.*.final_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """speecht5.encoder.wrapped_encoder.layer_norm""",
"""encoder.pos_emb.pe_k""": """speecht5.encoder.wrapped_encoder.embed_positions.pe_k""",
}
a_ : Union[str, Any] = {
"""decoder.layers.*.self_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj""",
"""decoder.layers.*.self_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj""",
"""decoder.layers.*.self_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj""",
"""decoder.layers.*.self_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj""",
"""decoder.layers.*.self_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm""",
"""decoder.layers.*.encoder_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj""",
"""decoder.layers.*.encoder_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj""",
"""decoder.layers.*.encoder_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj""",
"""decoder.layers.*.encoder_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj""",
"""decoder.layers.*.encoder_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm""",
"""decoder.layers.*.fc1""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense""",
"""decoder.layers.*.fc2""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense""",
"""decoder.layers.*.final_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm""",
}
a_ : Union[str, Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
a_ : Tuple = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
a_ : Optional[Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
a_ : int = []
a_ : int = [
"""encoder.version""",
"""encoder.layers.*.norm_k.weight""",
"""encoder.layers.*.norm_k.bias""",
"""decoder.version""",
"""decoder.layers.*.norm_k.weight""",
"""decoder.layers.*.norm_k.bias""",
"""decoder.pos_emb.pe_k""",
"""speech_encoder_prenet.embed_positions._float_tensor""",
"""text_decoder_prenet.embed_positions._float_tensor""",
]
a_ : Dict = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""speech_decoder_prenet.*""",
"""speech_decoder_postnet.*""",
]
a_ : str = IGNORE_KEYS + [
"""encoder.proj""",
"""speech_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
a_ : Tuple = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
def __snake_case ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict ):
for attribute in key.split("." ):
lowerCamelCase_ = getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
lowerCamelCase_ = getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
lowerCamelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowerCamelCase_ = value
elif weight_type == "weight_g":
lowerCamelCase_ = value
elif weight_type == "weight_v":
lowerCamelCase_ = value
elif weight_type == "bias":
lowerCamelCase_ = value
elif weight_type == "running_mean":
lowerCamelCase_ = value
elif weight_type == "running_var":
lowerCamelCase_ = value
elif weight_type == "num_batches_tracked":
lowerCamelCase_ = value
else:
lowerCamelCase_ = value
logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def __snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCamelCase_ ,lowerCamelCase_ = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __snake_case ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple ):
lowerCamelCase_ = []
if task == "s2t":
lowerCamelCase_ = hf_model.speechta.encoder.prenet.feature_encoder
lowerCamelCase_ = MAPPING_S2T
lowerCamelCase_ = IGNORE_KEYS_S2T
elif task == "t2s":
lowerCamelCase_ = None
lowerCamelCase_ = MAPPING_T2S
lowerCamelCase_ = IGNORE_KEYS_T2S
elif task == "s2s":
lowerCamelCase_ = hf_model.speechta.encoder.prenet.feature_encoder
lowerCamelCase_ = MAPPING_S2S
lowerCamelCase_ = IGNORE_KEYS_S2S
else:
raise ValueError(F'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(__lowerCamelCase , __lowerCamelCase ):
logger.info(F'''{name} was ignored''' )
continue
lowerCamelCase_ = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
lowerCamelCase_ = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
lowerCamelCase_ ,lowerCamelCase_ = key.split(".*." )
if prefix in name and suffix in name:
lowerCamelCase_ = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
lowerCamelCase_ = True
if "*" in mapped_key:
lowerCamelCase_ = name.split(__lowerCamelCase )[0].split("." )[-2]
lowerCamelCase_ = mapped_key.replace("*" , __lowerCamelCase )
if "weight_g" in name:
lowerCamelCase_ = "weight_g"
elif "weight_v" in name:
lowerCamelCase_ = "weight_v"
elif "bias" in name:
lowerCamelCase_ = "bias"
elif "weight" in name:
lowerCamelCase_ = "weight"
elif "running_mean" in name:
lowerCamelCase_ = "running_mean"
elif "running_var" in name:
lowerCamelCase_ = "running_var"
elif "num_batches_tracked" in name:
lowerCamelCase_ = "num_batches_tracked"
else:
lowerCamelCase_ = None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple ):
lowerCamelCase_ = full_name.split("conv_layers." )[-1]
lowerCamelCase_ = name.split("." )
lowerCamelCase_ = int(items[0] )
lowerCamelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
lowerCamelCase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
lowerCamelCase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
lowerCamelCase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
lowerCamelCase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def __snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Union[str, Any]=None , ):
if config_path is not None:
lowerCamelCase_ = SpeechTaConfig.from_pretrained(__lowerCamelCase )
else:
lowerCamelCase_ = SpeechTaConfig()
if task == "s2t":
lowerCamelCase_ = config.max_text_positions
lowerCamelCase_ = SpeechTaForSpeechToText(__lowerCamelCase )
elif task == "t2s":
lowerCamelCase_ = 1876
lowerCamelCase_ = 600
lowerCamelCase_ = config.max_speech_positions
lowerCamelCase_ = SpeechTaForTextToSpeech(__lowerCamelCase )
elif task == "s2s":
lowerCamelCase_ = 1876
lowerCamelCase_ = config.max_speech_positions
lowerCamelCase_ = SpeechTaForSpeechToSpeech(__lowerCamelCase )
else:
raise ValueError(F'''Unknown task name: {task}''' )
if vocab_path:
lowerCamelCase_ = SpeechTaTokenizer(__lowerCamelCase , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken("<mask>" , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )
lowerCamelCase_ = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
lowerCamelCase_ = SpeechTaFeatureExtractor()
lowerCamelCase_ = SpeechTaProcessor(tokenizer=__lowerCamelCase , feature_extractor=__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
lowerCamelCase_ = torch.load(__lowerCamelCase )
recursively_load_weights(fairseq_checkpoint["model"] , __lowerCamelCase , __lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(__lowerCamelCase )
model.push_to_hub(__lowerCamelCase )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--task""",
default="""s2t""",
type=str,
help="""Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--vocab_path""", default=None, type=str, help="""Path to SentencePiece model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
a_ : str = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 55 |
"""simple docstring"""
from math import pow
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , ) -> tuple[int, int]:
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
_snake_case = int(pow(__lowerCamelCase , __lowerCamelCase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
_snake_case , _snake_case = backtrack(
__lowerCamelCase , __lowerCamelCase , current_number + 1 , __lowerCamelCase , __lowerCamelCase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
_snake_case , _snake_case = backtrack(
__lowerCamelCase , __lowerCamelCase , current_number + 1 , __lowerCamelCase , __lowerCamelCase )
return current_sum, solutions_count
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int ) -> int:
if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(__lowerCamelCase , __lowerCamelCase , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 288 | 0 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "efficientformer"
def __init__( self : Tuple , __lowerCamelCase : List[int] = [3, 2, 6, 4] , __lowerCamelCase : List[int] = [48, 96, 224, 448] , __lowerCamelCase : List[bool] = [True, True, True, True] , __lowerCamelCase : int = 448 , __lowerCamelCase : int = 32 , __lowerCamelCase : int = 4 , __lowerCamelCase : int = 7 , __lowerCamelCase : int = 5 , __lowerCamelCase : int = 8 , __lowerCamelCase : int = 4 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : int = 16 , __lowerCamelCase : int = 3 , __lowerCamelCase : int = 3 , __lowerCamelCase : int = 3 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 1 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : int = 1 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : float = 1e-5 , __lowerCamelCase : str = "gelu" , __lowerCamelCase : float = 0.02 , __lowerCamelCase : float = 1e-12 , __lowerCamelCase : int = 224 , __lowerCamelCase : float = 1e-05 , **__lowerCamelCase : str , ) -> None:
super().__init__(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = hidden_sizes
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = depths
SCREAMING_SNAKE_CASE__ = mlp_expansion_ratio
SCREAMING_SNAKE_CASE__ = downsamples
SCREAMING_SNAKE_CASE__ = dim
SCREAMING_SNAKE_CASE__ = key_dim
SCREAMING_SNAKE_CASE__ = attention_ratio
SCREAMING_SNAKE_CASE__ = resolution
SCREAMING_SNAKE_CASE__ = pool_size
SCREAMING_SNAKE_CASE__ = downsample_patch_size
SCREAMING_SNAKE_CASE__ = downsample_stride
SCREAMING_SNAKE_CASE__ = downsample_pad
SCREAMING_SNAKE_CASE__ = drop_path_rate
SCREAMING_SNAKE_CASE__ = num_metaad_blocks
SCREAMING_SNAKE_CASE__ = distillation
SCREAMING_SNAKE_CASE__ = use_layer_scale
SCREAMING_SNAKE_CASE__ = layer_scale_init_value
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = batch_norm_eps
| 218 |
import datasets
from .evaluate import evaluate
_SCREAMING_SNAKE_CASE : Union[str, Any] = '''\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
'''
_SCREAMING_SNAKE_CASE : Dict = '''
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
'''
_SCREAMING_SNAKE_CASE : str = '''
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the SQuAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]
>>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]
>>> squad_metric = datasets.load_metric("squad")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def lowercase_ ( self : List[Any] ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {'''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.Value('''string''' )},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , reference_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , )
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
SCREAMING_SNAKE_CASE__ = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
SCREAMING_SNAKE_CASE__ = evaluate(dataset=__lowerCamelCase , predictions=__lowerCamelCase )
return score
| 218 | 1 |
from __future__ import annotations
def lowerCAmelCase_ ( __UpperCAmelCase: int , __UpperCAmelCase: int ) -> list[list[int]]:
UpperCamelCase__ : list[list[int]] = []
create_all_state(1 , __UpperCAmelCase , __UpperCAmelCase , [] , __UpperCAmelCase )
return result
def lowerCAmelCase_ ( __UpperCAmelCase: int , __UpperCAmelCase: int , __UpperCAmelCase: int , __UpperCAmelCase: list[int] , __UpperCAmelCase: list[list[int]] , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(__UpperCAmelCase , total_number - level + 2 ):
current_list.append(__UpperCAmelCase )
create_all_state(i + 1 , __UpperCAmelCase , level - 1 , __UpperCAmelCase , __UpperCAmelCase )
current_list.pop()
def lowerCAmelCase_ ( __UpperCAmelCase: list[list[int]] ) -> None:
for i in total_list:
print(*__UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ = 4
UpperCAmelCase_ = 2
UpperCAmelCase_ = generate_all_combinations(n, k)
print_all_state(total_list)
| 201 |
import random
def lowerCAmelCase_ ( __UpperCAmelCase: list , __UpperCAmelCase: Optional[int] ) -> tuple:
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : List[Any] = [], [], []
for element in data:
if element < pivot:
less.append(__UpperCAmelCase )
elif element > pivot:
greater.append(__UpperCAmelCase )
else:
equal.append(__UpperCAmelCase )
return less, equal, greater
def lowerCAmelCase_ ( __UpperCAmelCase: list , __UpperCAmelCase: int ) -> List[str]:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(__UpperCAmelCase ) or index < 0:
return None
UpperCamelCase__ : List[str] = items[random.randint(0 , len(__UpperCAmelCase ) - 1 )]
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : int = _partition(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : Union[str, Any] = len(__UpperCAmelCase )
UpperCamelCase__ : Dict = len(__UpperCAmelCase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__UpperCAmelCase , __UpperCAmelCase )
# must be in larger
else:
return quick_select(__UpperCAmelCase , index - (m + count) )
| 201 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = LEDTokenizer
__UpperCamelCase = LEDTokenizerFast
__UpperCamelCase = True
def _lowerCAmelCase ( self ):
"""simple docstring"""
super().setUp()
lowerCamelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowerCamelCase = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
lowerCamelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCamelCase = {"""unk_token""": """<unk>"""}
lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
def _lowerCAmelCase ( self , **_a ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , **_a ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowerCamelCase = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase = tokenizer(__SCREAMING_SNAKE_CASE , max_length=len(__SCREAMING_SNAKE_CASE ) , padding=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowerCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@require_torch
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertIn("""input_ids""" , __SCREAMING_SNAKE_CASE )
self.assertIn("""attention_mask""" , __SCREAMING_SNAKE_CASE )
self.assertNotIn("""labels""" , __SCREAMING_SNAKE_CASE )
self.assertNotIn("""decoder_attention_mask""" , __SCREAMING_SNAKE_CASE )
@require_torch
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase = tokenizer(text_target=__SCREAMING_SNAKE_CASE , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def _lowerCAmelCase ( self ):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase = tokenizer(
["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(batch.input_ids.shape , (2, 5_122) )
@require_torch
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = ["""A long paragraph for summarization."""]
lowerCamelCase = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
lowerCamelCase = tokenizer(text_target=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
lowerCamelCase = inputs["""input_ids"""]
lowerCamelCase = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def _lowerCAmelCase ( self ):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase = ["""Summary of the text.""", """Another summary."""]
lowerCamelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowerCamelCase = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE )
lowerCamelCase = [[0] * len(__SCREAMING_SNAKE_CASE ) for x in encoded_output["""input_ids"""]]
lowerCamelCase = tokenizer.pad(__SCREAMING_SNAKE_CASE )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , __SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
def _lowerCAmelCase ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCamelCase = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCamelCase = """A, <mask> AllenNLP sentence."""
lowerCamelCase = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
lowerCamelCase = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
lowerCamelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
lowerCamelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 352 |
"""simple docstring"""
def a__ ( snake_case__ , snake_case__ ) -> int:
return number | (1 << position)
def a__ ( snake_case__ , snake_case__ ) -> int:
return number & ~(1 << position)
def a__ ( snake_case__ , snake_case__ ) -> int:
return number ^ (1 << position)
def a__ ( snake_case__ , snake_case__ ) -> bool:
return ((number >> position) & 1) == 1
def a__ ( snake_case__ , snake_case__ ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 168 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCamelCase__ :int = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , UpperCamelCase_ = 1 , UpperCamelCase_ = None , UpperCamelCase_ = 0.0 , UpperCamelCase_ = 50 , UpperCamelCase_ = None , UpperCamelCase_ = "pil" , UpperCamelCase_ = True , ):
'''simple docstring'''
if isinstance(self.unet.config.sample_size , _SCREAMING_SNAKE_CASE ):
UpperCamelCase__ :Optional[int] = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
UpperCamelCase__ :List[Any] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_SCREAMING_SNAKE_CASE )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
UpperCamelCase__ :Optional[Any] = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCamelCase__ :Union[str, Any] = self.unet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCamelCase__ :Union[str, Any] = self.scheduler.step(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , use_clipped_model_output=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase__ :Dict = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ :Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ :Any = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE ) | 97 |
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[str]:
UpperCamelCase = 1.5
UpperCamelCase = int(factor * num_class_images )
UpperCamelCase = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__UpperCamelCase , aesthetic_weight=0.1 )
os.makedirs(F"{class_data_dir}/images" , exist_ok=__UpperCamelCase )
if len(list(Path(F"{class_data_dir}/images" ).iterdir() ) ) >= num_class_images:
return
while True:
UpperCamelCase = client.query(text=__UpperCamelCase )
if len(__UpperCamelCase ) >= factor * num_class_images or num_images > 1E4:
break
else:
UpperCamelCase = int(factor * num_images )
UpperCamelCase = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__UpperCamelCase , aesthetic_weight=0.1 , )
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = tqdm(desc="""downloading real regularization images""" , total=__UpperCamelCase )
with open(F"{class_data_dir}/caption.txt" , """w""" ) as fa, open(F"{class_data_dir}/urls.txt" , """w""" ) as fa, open(
F"{class_data_dir}/images.txt" , """w""" ) as fa:
while total < num_class_images:
UpperCamelCase = class_images[count]
count += 1
try:
UpperCamelCase = requests.get(images["""url"""] )
if img.status_code == 200:
UpperCamelCase = Image.open(BytesIO(img.content ) )
with open(F"{class_data_dir}/images/{total}.jpg" , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(F"{class_data_dir}/images/{total}.jpg" + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowercase__ ( )-> str:
UpperCamelCase = argparse.ArgumentParser("""""" , add_help=__UpperCamelCase )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=__UpperCamelCase , type=__UpperCamelCase )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=__UpperCamelCase , type=__UpperCamelCase )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=__UpperCamelCase )
return parser.parse_args()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 321 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_A = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 369 |
"""simple docstring"""
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
_A = logging.getLogger(__name__)
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Union[str, Any] = "token-classification"
def __init__( self : Dict , UpperCamelCase : Any ) -> Optional[int]:
"""simple docstring"""
if type(UpperCamelCase ) == dict:
lowerCAmelCase__ : Optional[int] = Namespace(**UpperCamelCase )
lowerCAmelCase__ : Tuple = import_module("""tasks""" )
try:
lowerCAmelCase__ : Union[str, Any] = getattr(UpperCamelCase , hparams.task_type )
lowerCAmelCase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
lowerCAmelCase__ : Optional[Any] = self.token_classification_task.get_labels(hparams.labels )
lowerCAmelCase__ : Dict = CrossEntropyLoss().ignore_index
super().__init__(UpperCamelCase , len(self.labels ) , self.mode )
def _lowerCAmelCase ( self : int , **UpperCamelCase : List[Any] ) -> str:
"""simple docstring"""
return self.model(**UpperCamelCase )
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Tuple = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
lowerCAmelCase__ : List[str] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCAmelCase__ : Tuple = self(**UpperCamelCase )
lowerCAmelCase__ : List[Any] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.hparams
for mode in ["train", "dev", "test"]:
lowerCAmelCase__ : Union[str, Any] = self._feature_file(UpperCamelCase )
if os.path.exists(UpperCamelCase ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , UpperCamelCase )
lowerCAmelCase__ : Tuple = torch.load(UpperCamelCase )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
lowerCAmelCase__ : Union[str, Any] = self.token_classification_task.read_examples_from_file(args.data_dir , UpperCamelCase )
lowerCAmelCase__ : Tuple = self.token_classification_task.convert_examples_to_features(
UpperCamelCase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=UpperCamelCase , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , UpperCamelCase )
torch.save(UpperCamelCase , UpperCamelCase )
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : bool = False ) -> DataLoader:
"""simple docstring"""
lowerCAmelCase__ : int = self._feature_file(UpperCamelCase )
logger.info("""Loading features from cached file %s""" , UpperCamelCase )
lowerCAmelCase__ : int = torch.load(UpperCamelCase )
lowerCAmelCase__ : str = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowerCAmelCase__ : Any = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowerCAmelCase__ : Optional[int] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowerCAmelCase__ : Union[str, Any] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowerCAmelCase__ : Union[str, Any] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , batch_size=UpperCamelCase )
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] ) -> List[str]:
"""simple docstring"""
"""Compute validation""" ""
lowerCAmelCase__ : str = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
lowerCAmelCase__ : List[Any] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCAmelCase__ : Union[str, Any] = self(**UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = outputs[:2]
lowerCAmelCase__ : Optional[Any] = logits.detach().cpu().numpy()
lowerCAmelCase__ : Optional[Any] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : str = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
lowerCAmelCase__ : Any = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
lowerCAmelCase__ : List[str] = np.argmax(UpperCamelCase , axis=2 )
lowerCAmelCase__ : str = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
lowerCAmelCase__ : Any = dict(enumerate(self.labels ) )
lowerCAmelCase__ : str = [[] for _ in range(out_label_ids.shape[0] )]
lowerCAmelCase__ : Optional[Any] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowerCAmelCase__ : Optional[int] = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(UpperCamelCase , UpperCamelCase ),
"""precision""": precision_score(UpperCamelCase , UpperCamelCase ),
"""recall""": recall_score(UpperCamelCase , UpperCamelCase ),
"""f1""": fa_score(UpperCamelCase , UpperCamelCase ),
}
lowerCAmelCase__ : Dict = dict(results.items() )
lowerCAmelCase__ : List[Any] = results
return ret, preds_list, out_label_list
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : List[Any] ) -> Any:
"""simple docstring"""
# when stable
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = self._eval_end(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowerCAmelCase ( self : Dict , UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
# updating to test_epoch_end instead of deprecated test_end
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._eval_end(UpperCamelCase )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowerCAmelCase__ : int = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowerCAmelCase ( UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
# Add NER specific options
BaseTransformer.add_model_specific_args(UpperCamelCase , UpperCamelCase )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=UpperCamelCase , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=1_28 , type=UpperCamelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=UpperCamelCase , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=UpperCamelCase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
_A = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
_A = NERTransformer.add_model_specific_args(parser, os.getcwd())
_A = parser.parse_args()
_A = NERTransformer(args)
_A = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
_A = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True))
_A = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 212 | 0 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
A__ = """\
"""
A__ = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
A__ = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def snake_case ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , )
def snake_case ( self , _snake_case , _snake_case , _snake_case = 16 , _snake_case = True , _snake_case=None ):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_lowerCAmelCase = """cuda"""
else:
_lowerCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained(_snake_case )
_lowerCAmelCase = model.to(_snake_case )
_lowerCAmelCase = AutoTokenizer.from_pretrained(_snake_case )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_lowerCAmelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_snake_case ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_lowerCAmelCase = model.config.max_length - 1
else:
_lowerCAmelCase = model.config.max_length
_lowerCAmelCase = tokenizer(
_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , return_tensors="""pt""" , return_attention_mask=_snake_case , ).to(_snake_case )
_lowerCAmelCase = encodings["""input_ids"""]
_lowerCAmelCase = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_lowerCAmelCase = []
_lowerCAmelCase = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0 , len(_snake_case ) , _snake_case ) ):
_lowerCAmelCase = min(start_index + batch_size , len(_snake_case ) )
_lowerCAmelCase = encoded_texts[start_index:end_index]
_lowerCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
_lowerCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_snake_case )
_lowerCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
_lowerCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_snake_case ), attn_mask] , dim=1 )
_lowerCAmelCase = encoded_batch
with torch.no_grad():
_lowerCAmelCase = model(_snake_case , attention_mask=_snake_case ).logits
_lowerCAmelCase = out_logits[..., :-1, :].contiguous()
_lowerCAmelCase = labels[..., 1:].contiguous()
_lowerCAmelCase = attn_mask[..., 1:].contiguous()
_lowerCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _snake_case ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_snake_case )}
| 82 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a_ = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['ConvNextFeatureExtractor']
a_ = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 249 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Optional[Any] = "naver-clova-ix/donut-base-finetuned-docvqa"
a__ : Dict = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
a__ : Optional[int] = "document_qa"
a__ : Dict = AutoProcessor
a__ : Optional[int] = VisionEncoderDecoderModel
a__ : List[Any] = ["image", "text"]
a__ : List[Any] = ["text"]
def __init__( self : Any , *_lowercase : Optional[int] , **_lowercase : int ):
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*_lowercase , **_lowercase )
def a ( self : List[Any] , _lowercase : "Image" , _lowercase : str ):
__UpperCAmelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
__UpperCAmelCase = task_prompt.replace('''{user_input}''' , _lowercase )
__UpperCAmelCase = self.pre_processor.tokenizer(
_lowercase , add_special_tokens=_lowercase , return_tensors='''pt''' ).input_ids
__UpperCAmelCase = self.pre_processor(_lowercase , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def a ( self : Any , _lowercase : int ):
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_lowercase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_lowercase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_lowercase , ).sequences
def a ( self : Dict , _lowercase : int ):
__UpperCAmelCase = self.pre_processor.batch_decode(_lowercase )[0]
__UpperCAmelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
__UpperCAmelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
__UpperCAmelCase = re.sub(r'''<.*?>''' , '''''' , _lowercase , count=1 ).strip() # remove first task start token
__UpperCAmelCase = self.pre_processor.tokenajson(_lowercase )
return sequence["answer"]
| 86 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _UpperCAmelCase ( unittest.TestCase ):
a__ : Optional[int] = MODEL_FOR_MASKED_LM_MAPPING
a__ : Tuple = TF_MODEL_FOR_MASKED_LM_MAPPING
def a ( self : List[str] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def a ( self : Tuple ):
__UpperCAmelCase = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''tf''' )
__UpperCAmelCase = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{'''sequence''': '''My name is grouped''', '''score''': 2.1E-05, '''token''': 3_80_15, '''token_str''': ''' grouped'''},
{'''sequence''': '''My name is accuser''', '''score''': 2.1E-05, '''token''': 2_55_06, '''token_str''': ''' accuser'''},
] , )
__UpperCAmelCase = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is grouped''',
'''score''': 2.1E-05,
'''token''': 3_80_15,
'''token_str''': ''' grouped''',
},
{
'''sequence''': '''The largest city in France is accuser''',
'''score''': 2.1E-05,
'''token''': 2_55_06,
'''token_str''': ''' accuser''',
},
] , )
__UpperCAmelCase = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{'''sequence''': '''My name is Clara''', '''score''': 2E-05, '''token''': 1_36_06, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Patrick''', '''score''': 2E-05, '''token''': 34_99, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 1.9E-05, '''token''': 29_41, '''token_str''': ''' Te'''},
] , )
@require_torch
def a ( self : Optional[int] ):
__UpperCAmelCase = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''pt''' )
__UpperCAmelCase = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{'''sequence''': '''My name is Maul''', '''score''': 2.2E-05, '''token''': 3_56_76, '''token_str''': ''' Maul'''},
{'''sequence''': '''My name isELS''', '''score''': 2.2E-05, '''token''': 1_64_16, '''token_str''': '''ELS'''},
] , )
__UpperCAmelCase = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is Maul''',
'''score''': 2.2E-05,
'''token''': 3_56_76,
'''token_str''': ''' Maul''',
},
{'''sequence''': '''The largest city in France isELS''', '''score''': 2.2E-05, '''token''': 1_64_16, '''token_str''': '''ELS'''},
] , )
__UpperCAmelCase = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 2.1E-05, '''token''': 34_99, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 2E-05, '''token''': 29_41, '''token_str''': ''' Te'''},
{'''sequence''': '''My name is Clara''', '''score''': 2E-05, '''token''': 1_36_06, '''token_str''': ''' Clara'''},
] , )
__UpperCAmelCase = unmasker('''My name is <mask> <mask>''' , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
[
{
'''score''': 2.2E-05,
'''token''': 3_56_76,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is Maul<mask></s>''',
},
{'''score''': 2.2E-05, '''token''': 1_64_16, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''},
],
[
{
'''score''': 2.2E-05,
'''token''': 3_56_76,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is<mask> Maul</s>''',
},
{'''score''': 2.2E-05, '''token''': 1_64_16, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''},
],
] , )
@require_torch_gpu
def a ( self : Any ):
__UpperCAmelCase = pipeline('''fill-mask''' , model='''hf-internal-testing/tiny-random-distilbert''' , device=0 , framework='''pt''' )
# convert model to fp16
pipe.model.half()
__UpperCAmelCase = pipe('''Paris is the [MASK] of France.''' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_lowercase , _lowercase )
@slow
@require_torch
def a ( self : int ):
__UpperCAmelCase = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''pt''' )
self.run_large_test(_lowercase )
@slow
@require_tf
def a ( self : Optional[Any] ):
__UpperCAmelCase = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''tf''' )
self.run_large_test(_lowercase )
def a ( self : Dict , _lowercase : str ):
__UpperCAmelCase = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(_lowercase ) , [
{'''sequence''': '''My name is John''', '''score''': 0.008, '''token''': 6_10, '''token_str''': ''' John'''},
{'''sequence''': '''My name is Chris''', '''score''': 0.007, '''token''': 15_73, '''token_str''': ''' Chris'''},
] , )
__UpperCAmelCase = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(_lowercase ) , [
{
'''sequence''': '''The largest city in France is Paris''',
'''score''': 0.251,
'''token''': 22_01,
'''token_str''': ''' Paris''',
},
{
'''sequence''': '''The largest city in France is Lyon''',
'''score''': 0.214,
'''token''': 1_27_90,
'''token_str''': ''' Lyon''',
},
] , )
__UpperCAmelCase = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(_lowercase ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 0.005, '''token''': 34_99, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Clara''', '''score''': 0.000, '''token''': 1_36_06, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Te''', '''score''': 0.000, '''token''': 29_41, '''token_str''': ''' Te'''},
] , )
@require_torch
def a ( self : List[Any] ):
__UpperCAmelCase = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''pt''' )
__UpperCAmelCase = None
__UpperCAmelCase = None
self.run_pipeline_test(_lowercase , [] )
@require_tf
def a ( self : str ):
__UpperCAmelCase = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''tf''' )
__UpperCAmelCase = None
__UpperCAmelCase = None
self.run_pipeline_test(_lowercase , [] )
def a ( self : Optional[int] , _lowercase : Optional[Any] , _lowercase : Optional[int] , _lowercase : Tuple ):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' )
__UpperCAmelCase = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
__UpperCAmelCase = [
F'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def a ( self : int , _lowercase : Tuple , _lowercase : Tuple ):
__UpperCAmelCase = fill_masker.tokenizer
__UpperCAmelCase = fill_masker.model
__UpperCAmelCase = fill_masker(
F'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
_lowercase , [
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
] , )
__UpperCAmelCase = fill_masker([F'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
_lowercase , [
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
] , )
__UpperCAmelCase = fill_masker([F'''This is a {tokenizer.mask_token}''', F'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
_lowercase , [
[
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
],
[
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
],
] , )
with self.assertRaises(_lowercase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_lowercase ):
fill_masker('''This is''' )
self.run_test_top_k(_lowercase , _lowercase )
self.run_test_targets(_lowercase , _lowercase )
self.run_test_top_k_targets(_lowercase , _lowercase )
self.fill_mask_with_duplicate_targets_and_top_k(_lowercase , _lowercase )
self.fill_mask_with_multiple_masks(_lowercase , _lowercase )
def a ( self : Optional[Any] , _lowercase : Optional[Any] , _lowercase : List[Any] ):
__UpperCAmelCase = tokenizer.get_vocab()
__UpperCAmelCase = sorted(vocab.keys() )[:2]
# Pipeline argument
__UpperCAmelCase = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase , targets=_lowercase )
__UpperCAmelCase = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_lowercase , [
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
] , )
__UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , _lowercase )
__UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(_lowercase ) )
# Call argument
__UpperCAmelCase = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
__UpperCAmelCase = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_lowercase )
self.assertEqual(
_lowercase , [
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
] , )
__UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , _lowercase )
__UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(_lowercase ) )
# Score equivalence
__UpperCAmelCase = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_lowercase )
__UpperCAmelCase = [top_mask['''token_str'''] for top_mask in outputs]
__UpperCAmelCase = [top_mask['''score'''] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_lowercase ) == set(_lowercase ):
__UpperCAmelCase = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_lowercase )
__UpperCAmelCase = [top_mask['''score'''] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_lowercase ) , nested_simplify(_lowercase ) )
# Raises with invalid
with self.assertRaises(_lowercase ):
__UpperCAmelCase = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_lowercase ):
__UpperCAmelCase = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[''''''] )
with self.assertRaises(_lowercase ):
__UpperCAmelCase = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets='''''' )
def a ( self : List[Any] , _lowercase : Tuple , _lowercase : Optional[Any] ):
__UpperCAmelCase = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase , top_k=2 )
__UpperCAmelCase = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_lowercase , [
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
] , )
__UpperCAmelCase = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
__UpperCAmelCase = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_lowercase , [
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
] , )
self.assertEqual(nested_simplify(_lowercase ) , nested_simplify(_lowercase ) )
def a ( self : Optional[int] , _lowercase : int , _lowercase : Tuple ):
__UpperCAmelCase = tokenizer.get_vocab()
__UpperCAmelCase = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
# top_k=2, ntargets=3
__UpperCAmelCase = sorted(vocab.keys() )[:3]
__UpperCAmelCase = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=_lowercase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
__UpperCAmelCase = [el['''token_str'''] for el in sorted(_lowercase , key=lambda _lowercase : x["score"] , reverse=_lowercase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_lowercase ).issubset(_lowercase ):
__UpperCAmelCase = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=_lowercase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_lowercase ) , nested_simplify(_lowercase ) )
def a ( self : Union[str, Any] , _lowercase : Tuple , _lowercase : Union[str, Any] ):
__UpperCAmelCase = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
__UpperCAmelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
__UpperCAmelCase = sorted(vocab.keys() )[:3]
__UpperCAmelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
__UpperCAmelCase = fill_masker(F'''My name is {tokenizer.mask_token}''' , targets=_lowercase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_lowercase ) , 3 )
def a ( self : Dict , _lowercase : Dict , _lowercase : Any ):
__UpperCAmelCase = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
__UpperCAmelCase = fill_masker(
F'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_lowercase , [
[
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
],
[
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
],
[
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
],
] , )
| 86 | 1 |
'''simple docstring'''
from __future__ import annotations
_A : List[Any] ='''Muhammad Umer Farooq'''
_A : Union[str, Any] ='''MIT'''
_A : List[Any] ='''1.0.0'''
_A : List[str] ='''Muhammad Umer Farooq'''
_A : str ='''[email protected]'''
_A : List[Any] ='''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class _lowercase ( _lowercase ):
def __init__( self: Tuple , UpperCamelCase__: str ):
super().__init__()
lowerCamelCase__ : list[str] = []
lowerCamelCase__ : int = domain
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: str , UpperCamelCase__: list[tuple[str, str | None]] ):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
lowerCamelCase__ : str = parse.urljoin(self.domain , UpperCamelCase__ )
self.urls.append(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
return ".".join(get_sub_domain_name(UpperCamelCase ).split(""".""" )[-2:] )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
return parse.urlparse(UpperCamelCase ).netloc
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = "https://github.com" ) -> list[str]:
lowerCamelCase__ : List[Any] = get_domain_name(UpperCamelCase )
# Initialize the parser
lowerCamelCase__ : Optional[int] = Parser(UpperCamelCase )
try:
# Open URL
lowerCamelCase__ : Union[str, Any] = requests.get(UpperCamelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
lowerCamelCase__ : List[str] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
lowerCamelCase__ : Optional[Any] = requests.get(UpperCamelCase )
# Get the valid email.
lowerCamelCase__ : Union[str, Any] = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(UpperCamelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(UpperCamelCase )
if __name__ == "__main__":
_A : List[str] =emails_from_url('''https://github.com''')
print(F'{len(emails)} emails found:')
print('''\n'''.join(sorted(emails)))
| 41 |
"""simple docstring"""
from __future__ import annotations
import math
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> list:
'''simple docstring'''
if len(__lowerCAmelCase ) != 2 or len(a[0] ) != 2 or len(__lowerCAmelCase ) != 2 or len(b[0] ) != 2:
raise Exception("""Matrices are not 2x2""" )
lowercase_ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowerCAmelCase ) )
]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowerCAmelCase ) )
]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> tuple[list, list, list, list]:
'''simple docstring'''
if len(__lowerCAmelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("""Odd matrices are not supported!""" )
lowercase_ = len(__lowerCAmelCase )
lowercase_ = matrix_length // 2
lowercase_ = [[a[i][j] for j in range(__lowerCAmelCase , __lowerCAmelCase )] for i in range(__lowerCAmelCase )]
lowercase_ = [
[a[i][j] for j in range(__lowerCAmelCase , __lowerCAmelCase )] for i in range(__lowerCAmelCase , __lowerCAmelCase )
]
lowercase_ = [[a[i][j] for j in range(__lowerCAmelCase )] for i in range(__lowerCAmelCase )]
lowercase_ = [[a[i][j] for j in range(__lowerCAmelCase )] for i in range(__lowerCAmelCase , __lowerCAmelCase )]
return top_left, top_right, bot_left, bot_right
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> tuple[int, int]:
'''simple docstring'''
return len(__lowerCAmelCase ), len(matrix[0] )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> None:
'''simple docstring'''
print("""\n""".join(str(__lowerCAmelCase ) for line in matrix ) )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> list:
'''simple docstring'''
if matrix_dimensions(__lowerCAmelCase ) == (2, 2):
return default_matrix_multiplication(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = split_matrix(__lowerCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = split_matrix(__lowerCAmelCase )
lowercase_ = actual_strassen(__lowerCAmelCase , matrix_subtraction(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase_ = actual_strassen(matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
lowercase_ = actual_strassen(matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
lowercase_ = actual_strassen(__lowerCAmelCase , matrix_subtraction(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase_ = actual_strassen(matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) , matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase_ = actual_strassen(matrix_subtraction(__lowerCAmelCase , __lowerCAmelCase ) , matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase_ = actual_strassen(matrix_subtraction(__lowerCAmelCase , __lowerCAmelCase ) , matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase_ = matrix_addition(matrix_subtraction(matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase ) , __lowerCAmelCase )
lowercase_ = matrix_addition(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = matrix_addition(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = matrix_subtraction(matrix_subtraction(matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase ) , __lowerCAmelCase )
# construct the new matrix from our 4 quadrants
lowercase_ = []
for i in range(len(__lowerCAmelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__lowerCAmelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> list:
'''simple docstring'''
if matrix_dimensions(__lowerCAmelCase )[1] != matrix_dimensions(__lowerCAmelCase )[0]:
lowercase_ = (
"""Unable to multiply these matrices, please check the dimensions.\n"""
F'''Matrix A: {matrixa}\n'''
F'''Matrix B: {matrixa}'''
)
raise Exception(__lowerCAmelCase )
lowercase_ = matrix_dimensions(__lowerCAmelCase )
lowercase_ = matrix_dimensions(__lowerCAmelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
lowercase_ = max(*__lowerCAmelCase , *__lowerCAmelCase )
lowercase_ = int(math.pow(2 , math.ceil(math.loga(__lowerCAmelCase ) ) ) )
lowercase_ = matrixa
lowercase_ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __lowerCAmelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowerCAmelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowerCAmelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
lowercase_ = actual_strassen(__lowerCAmelCase , __lowerCAmelCase )
# Removing the additional zeros
for i in range(0 , __lowerCAmelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowerCAmelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
UpperCAmelCase : List[Any] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
UpperCAmelCase : Optional[int] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 136 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( A_ , A_ )-> list[str]:
'''simple docstring'''
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
a : Optional[Any] = number_of_bytes // partitions
a : int = []
for i in range(snake_case_ ):
a : Optional[Any] = i * bytes_per_partition + 1
a : Optional[Any] = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 |
"""simple docstring"""
import sys
import turtle
def lowercase ( A_ , A_ )-> tuple[float, float]:
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowercase ( A_ , A_ , A_ , A_ , )-> None:
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(A_ , get_mid(A_ , A_ ) , get_mid(A_ , A_ ) , depth - 1 )
triangle(A_ , get_mid(A_ , A_ ) , get_mid(A_ , A_ ) , depth - 1 )
triangle(A_ , get_mid(A_ , A_ ) , get_mid(A_ , A_ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"""Correct format for using this script: """
"""python fractals.py <int:depth_for_fractal>"""
)
__lowercase = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("""red""")
__lowercase = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 226 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class a :
def __init__( self : Union[str, Any] ):
_UpperCAmelCase = {}
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ):
_UpperCAmelCase = {}
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : float ):
if nodea not in self.connections:
self.add_node(__lowerCAmelCase )
if nodea not in self.connections:
self.add_node(__lowerCAmelCase )
_UpperCAmelCase = probability
def lowerCAmelCase_ ( self : Optional[Any] ):
return list(self.connections )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : str ):
_UpperCAmelCase = 0
_UpperCAmelCase = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(lowercase ,lowercase ,lowercase )
_UpperCAmelCase = Counter(graph.get_nodes() )
_UpperCAmelCase = start
for _ in range(lowercase ):
_UpperCAmelCase = graph.transition(lowercase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 289 | """simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : int = StableUnCLIPPipeline
_snake_case : str = TEXT_TO_IMAGE_PARAMS
_snake_case : Any = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_snake_case : str = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_snake_case : str = False
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = 32
_UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=__lowerCAmelCase , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__lowerCAmelCase , num_layers=1 , )
torch.manual_seed(0 )
_UpperCAmelCase = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowerCAmelCase , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
_UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=__lowerCAmelCase )
_UpperCAmelCase = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowerCAmelCase , layers_per_block=1 , upcast_attention=__lowerCAmelCase , use_linear_projection=__lowerCAmelCase , )
torch.manual_seed(0 )
_UpperCAmelCase = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL()
_UpperCAmelCase = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str=0 ):
if str(__lowerCAmelCase ).startswith("""mps""" ):
_UpperCAmelCase = torch.manual_seed(__lowerCAmelCase )
else:
_UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_UpperCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=__lowerCAmelCase )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
_UpperCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
_UpperCAmelCase = pipe("""anime turle""" , generator=__lowerCAmelCase , output_type="""np""" )
_UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
_UpperCAmelCase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
_UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 289 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
_a = StableDiffusionPanoramaPipeline
_a = TEXT_TO_IMAGE_PARAMS
_a = TEXT_TO_IMAGE_BATCH_PARAMS
_a = TEXT_TO_IMAGE_IMAGE_PARAMS
_a = TEXT_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE ( self: int ):
torch.manual_seed(0 )
lowercase :List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
lowercase :Any = DDIMScheduler()
torch.manual_seed(0 )
lowercase :Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase :Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
lowercase :Any = CLIPTextModel(_lowerCAmelCase )
lowercase :str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowercase :Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Dict=0 ):
lowercase :Any = torch.manual_seed(_lowerCAmelCase )
lowercase :Any = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
lowercase :List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase :int = self.get_dummy_components()
lowercase :int = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase :Tuple = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase :List[str] = self.get_dummy_inputs(_lowerCAmelCase )
lowercase :List[Any] = sd_pipe(**_lowerCAmelCase ).images
lowercase :List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase :List[str] = np.array([0.61_86, 0.53_74, 0.49_15, 0.41_35, 0.41_14, 0.45_63, 0.51_28, 0.49_77, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def SCREAMING_SNAKE_CASE ( self: int ):
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
lowercase :Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase :List[str] = self.get_dummy_components()
lowercase :Optional[Any] = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase :Any = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase :Optional[int] = self.get_dummy_inputs(_lowerCAmelCase )
lowercase :List[Any] = "french fries"
lowercase :int = sd_pipe(**_lowerCAmelCase , negative_prompt=_lowerCAmelCase )
lowercase :int = output.images
lowercase :List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase :Optional[Any] = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase :int = self.get_dummy_components()
lowercase :List[str] = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase :int = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase :Dict = self.get_dummy_inputs(_lowerCAmelCase )
lowercase :Any = sd_pipe(**_lowerCAmelCase , view_batch_size=2 )
lowercase :Union[str, Any] = output.images
lowercase :List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase :Optional[int] = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self: str ):
lowercase :int = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase :List[Any] = self.get_dummy_components()
lowercase :Tuple = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" )
lowercase :Tuple = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase :Any = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase :Optional[Any] = self.get_dummy_inputs(_lowerCAmelCase )
lowercase :List[Any] = sd_pipe(**_lowerCAmelCase ).images
lowercase :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase :Optional[Any] = np.array([0.40_24, 0.65_10, 0.49_01, 0.53_78, 0.58_13, 0.56_22, 0.47_95, 0.44_67, 0.49_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
lowercase :Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase :List[Any] = self.get_dummy_components()
lowercase :int = PNDMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , skip_prk_steps=_lowerCAmelCase )
lowercase :List[str] = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase :int = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase :Union[str, Any] = self.get_dummy_inputs(_lowerCAmelCase )
lowercase :Any = sd_pipe(**_lowerCAmelCase ).images
lowercase :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase :Tuple = np.array([0.63_91, 0.62_91, 0.48_61, 0.51_34, 0.55_52, 0.45_78, 0.50_32, 0.50_23, 0.45_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self: str ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: Union[str, Any]=0 ):
lowercase :Any = torch.manual_seed(_lowerCAmelCase )
lowercase :Optional[int] = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE ( self: Tuple ):
lowercase :Dict = "stabilityai/stable-diffusion-2-base"
lowercase :Optional[Any] = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="scheduler" )
lowercase :List[str] = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase :Any = self.get_inputs()
lowercase :Optional[int] = pipe(**_lowerCAmelCase ).images
lowercase :int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
lowercase :Tuple = np.array(
[
0.36_96_83_92,
0.27_02_53_72,
0.32_44_67_66,
0.28_37_93_87,
0.36_36_32_74,
0.30_73_33_47,
0.27_10_00_27,
0.27_05_41_25,
0.25_53_60_96,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
lowercase :Union[str, Any] = StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base" , safety_checker=_lowerCAmelCase )
lowercase :int = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase :int = self.get_inputs()
lowercase :Optional[int] = pipe(**_lowerCAmelCase ).images
lowercase :List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
lowercase :Union[str, Any] = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :Dict = 0
def callback_fn(_lowerCAmelCase: int , _lowerCAmelCase: int , _lowerCAmelCase: torch.FloatTensor ) -> None:
lowercase :Any = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase :Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
lowercase :Optional[int] = latents[0, -3:, -3:, -1]
lowercase :Any = np.array(
[
0.18_68_18_69,
0.33_90_78_16,
0.5_36_12_76,
0.14_43_28_65,
-0.02_85_66_11,
-0.73_94_11_23,
0.23_39_79_87,
0.47_32_26_82,
-0.37_82_31_64,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowercase :str = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
lowercase :Optional[int] = latents[0, -3:, -3:, -1]
lowercase :Optional[Any] = np.array(
[
0.18_53_96_45,
0.33_98_72_48,
0.5_37_85_59,
0.14_43_71_42,
-0.02_45_52_61,
-0.7_33_83_17,
0.23_99_07_55,
0.47_35_62_72,
-0.3_78_65_05,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowercase :int = False
lowercase :Tuple = "stabilityai/stable-diffusion-2-base"
lowercase :Optional[Any] = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="scheduler" )
lowercase :Optional[int] = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
lowercase :Optional[int] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase :int = self.get_inputs()
pipe(**_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def SCREAMING_SNAKE_CASE ( self: str ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase :Optional[Any] = "stabilityai/stable-diffusion-2-base"
lowercase :Dict = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="scheduler" )
lowercase :Optional[int] = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
lowercase :Union[str, Any] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase :Optional[int] = self.get_inputs()
lowercase :Union[str, Any] = pipe(**_lowerCAmelCase )
lowercase :List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 158 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def UpperCAmelCase__ ( lowerCamelCase ):
if is_torch_version("<", "2.0.0" ) or not hasattr(lowerCamelCase, "_dynamo" ):
return False
return isinstance(lowerCamelCase, torch._dynamo.eval_frame.OptimizedModule )
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase = True ):
lowercase :Optional[Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowercase :str = is_compiled_module(lowerCamelCase )
if is_compiled:
lowercase :str = model
lowercase :str = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(lowerCamelCase, lowerCamelCase ):
lowercase :Any = model.module
if not keep_fpaa_wrapper:
lowercase :List[Any] = getattr(lowerCamelCase, "forward" )
lowercase :Union[str, Any] = model.__dict__.pop("_original_forward", lowerCamelCase )
if original_forward is not None:
while hasattr(lowerCamelCase, "__wrapped__" ):
lowercase :Tuple = forward.__wrapped__
if forward == original_forward:
break
lowercase :Tuple = forward
if getattr(lowerCamelCase, "_converted_to_transformer_engine", lowerCamelCase ):
convert_model(lowerCamelCase, to_transformer_engine=lowerCamelCase )
if is_compiled:
lowercase :List[Any] = model
lowercase :Optional[int] = compiled_model
return model
def UpperCAmelCase__ ( ):
PartialState().wait_for_everyone()
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(lowerCamelCase, lowerCamelCase )
elif PartialState().local_process_index == 0:
torch.save(lowerCamelCase, lowerCamelCase )
@contextmanager
def UpperCAmelCase__ ( **lowerCamelCase ):
for key, value in kwargs.items():
lowercase :List[str] = str(lowerCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def UpperCAmelCase__ ( lowerCamelCase ):
if not hasattr(lowerCamelCase, "__qualname__" ) and not hasattr(lowerCamelCase, "__name__" ):
lowercase :Optional[int] = getattr(lowerCamelCase, "__class__", lowerCamelCase )
if hasattr(lowerCamelCase, "__qualname__" ):
return obj.__qualname__
if hasattr(lowerCamelCase, "__name__" ):
return obj.__name__
return str(lowerCamelCase )
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
for key, value in source.items():
if isinstance(lowerCamelCase, lowerCamelCase ):
lowercase :Tuple = destination.setdefault(lowerCamelCase, {} )
merge_dicts(lowerCamelCase, lowerCamelCase )
else:
lowercase :Optional[Any] = value
return destination
def UpperCAmelCase__ ( lowerCamelCase = None ):
if port is None:
lowercase :Tuple = 29500
with socket.socket(socket.AF_INET, socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 158 | 1 |
def UpperCamelCase_( lowerCamelCase_ ) -> list:
if len(lowerCamelCase_ ) < 2:
return collection
def circle_sort_util(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> bool:
_lowercase : Any = False
if low == high:
return swapped
_lowercase : Union[str, Any] = low
_lowercase : List[Any] = high
while left < right:
if collection[left] > collection[right]:
_lowercase , _lowercase : Optional[int] = (
collection[right],
collection[left],
)
_lowercase : int = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
_lowercase , _lowercase : Optional[int] = (
collection[right + 1],
collection[left],
)
_lowercase : List[Any] = True
_lowercase : Tuple = low + int((high - low) / 2 )
_lowercase : Tuple = circle_sort_util(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Any = circle_sort_util(lowerCamelCase_ , mid + 1 , lowerCamelCase_ )
return swapped or left_swap or right_swap
_lowercase : Union[str, Any] = True
while is_not_sorted is True:
_lowercase : Union[str, Any] = circle_sort_util(lowerCamelCase_ , 0 , len(lowerCamelCase_ ) - 1 )
return collection
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : str = input("Enter numbers separated by a comma:\n").strip()
SCREAMING_SNAKE_CASE : int = [int(item) for item in user_input.split(",")]
print(circle_sort(unsorted))
| 21 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Optional[Any] = ["""flax""", """transformers"""]
def __init__( self : Union[str, Any] , *_A : Dict , **_A : Any ) -> int:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *_A : List[Any] , **_A : Any ) -> List[str]:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : List[str] , *_A : Tuple , **_A : Optional[int] ) -> int:
requires_backends(cls , ['flax', 'transformers'] )
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Union[str, Any] = ["""flax""", """transformers"""]
def __init__( self : Union[str, Any] , *_A : Any , **_A : int ) -> List[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] , *_A : Optional[int] , **_A : Dict ) -> Optional[Any]:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Tuple , *_A : Any , **_A : Union[str, Any] ) -> Dict:
requires_backends(cls , ['flax', 'transformers'] )
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Dict = ["""flax""", """transformers"""]
def __init__( self : int , *_A : Optional[int] , **_A : Any ) -> List[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *_A : int , **_A : str ) -> Any:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *_A : Union[str, Any] , **_A : List[str] ) -> Optional[int]:
requires_backends(cls , ['flax', 'transformers'] )
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Optional[int] = ["""flax""", """transformers"""]
def __init__( self : Tuple , *_A : Dict , **_A : str ) -> Optional[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : str , *_A : Dict , **_A : Optional[Any] ) -> Dict:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *_A : List[str] , **_A : str ) -> Optional[int]:
requires_backends(cls , ['flax', 'transformers'] ) | 331 | 0 |
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
A: int = 5_0_0_0_0
A: List[Any] = 5_0_0_0
A , A: Tuple = os.path.split(__file__)
A: Dict = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def _snake_case ( UpperCamelCase : datasets.Dataset , UpperCamelCase : List[str] ):
for i in range(lowerCAmelCase__ ):
UpperCAmelCase : str = dataset[i]
@get_duration
def _snake_case ( UpperCamelCase : datasets.Dataset , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple ):
for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ ):
UpperCAmelCase : Optional[int] = dataset[i : i + batch_size]
@get_duration
def _snake_case ( UpperCamelCase : datasets.Dataset , UpperCamelCase : Union[str, Any] , UpperCamelCase : str ):
with dataset.formatted_as(type=lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
UpperCAmelCase : Dict = dataset[i]
@get_duration
def _snake_case ( UpperCamelCase : datasets.Dataset , UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : int ):
with dataset.formatted_as(type=lowerCAmelCase__ ):
for i in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase : int = dataset[i : i + batch_size]
def _snake_case ( ):
UpperCAmelCase : Any = {'''num examples''': SPEED_TEST_N_EXAMPLES}
UpperCAmelCase : List[Any] = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1000}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''pandas''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''torch''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''tensorflow''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1000}),
]
UpperCAmelCase : Union[str, Any] = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1000}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("""generating dataset""" )
UpperCAmelCase : Optional[Any] = datasets.Features(
{"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} )
UpperCAmelCase : Optional[int] = generate_example_dataset(
os.path.join(lowerCAmelCase__ , """dataset.arrow""" ) , lowerCAmelCase__ , num_examples=lowerCAmelCase__ , seq_shapes={"""list""": (100,)} , )
print("""first set of iterations""" )
for func, kwargs in functions:
print(func.__name__ , str(lowerCAmelCase__ ) )
UpperCAmelCase : str = func(lowerCAmelCase__ , **lowerCAmelCase__ )
print("""shuffling dataset""" )
UpperCAmelCase : int = dataset.shuffle()
print("""Second set of iterations (after shuffling""" )
for func, kwargs in functions_shuffled:
print("""shuffled """ , func.__name__ , str(lowerCAmelCase__ ) )
UpperCAmelCase : List[Any] = func(
lowerCAmelCase__ , **lowerCAmelCase__ )
with open(lowerCAmelCase__ , """wb""" ) as f:
f.write(json.dumps(lowerCAmelCase__ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 351 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
A: str = None
A: List[Any] = logging.get_logger(__name__)
A: Union[str, Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
A: Union[str, Any] = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
A: Tuple = {
"facebook/mbart-large-en-ro": 1_0_2_4,
"facebook/mbart-large-cc25": 1_0_2_4,
}
# fmt: off
A: Any = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Tuple = VOCAB_FILES_NAMES
__lowerCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Tuple = ['input_ids', 'attention_mask']
__lowerCAmelCase : str = MBartTokenizer
__lowerCAmelCase : List[int] = []
__lowerCAmelCase : List[int] = []
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> Any:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
vocab_file=_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , src_lang=_SCREAMING_SNAKE_CASE , tgt_lang=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : int = vocab_file
UpperCAmelCase : Optional[int] = False if not self.vocab_file else True
UpperCAmelCase : List[str] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
UpperCAmelCase : List[Any] = {
lang_code: self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCAmelCase : int = src_lang if src_lang is not None else """en_XX"""
UpperCAmelCase : List[Any] = self.convert_tokens_to_ids(self._src_lang )
UpperCAmelCase : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
UpperCAmelCase : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : str = [self.sep_token_id]
UpperCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
UpperCAmelCase : List[str] = src_lang
UpperCAmelCase : Union[str, Any] = self(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "en_XX" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "ro_RO" , **_SCREAMING_SNAKE_CASE , ) -> BatchEncoding:
'''simple docstring'''
UpperCAmelCase : int = src_lang
UpperCAmelCase : Dict = tgt_lang
return super().prepare_seqaseq_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
UpperCAmelCase : Any = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = []
UpperCAmelCase : Tuple = [self.eos_token_id, self.cur_lang_code]
UpperCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
UpperCAmelCase : Tuple = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = []
UpperCAmelCase : Optional[int] = [self.eos_token_id, self.cur_lang_code]
UpperCAmelCase : str = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase : int = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory." )
return
UpperCAmelCase : Any = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 76 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''',
'''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''',
}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Optional[Any] = '''falcon'''
__lowercase : Optional[int] = ['''past_key_values''']
def __init__( self ,__UpperCAmelCase=6_5024 ,__UpperCAmelCase=4544 ,__UpperCAmelCase=32 ,__UpperCAmelCase=71 ,__UpperCAmelCase=1E-5 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=True ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=None ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,__UpperCAmelCase=11 ,__UpperCAmelCase=11 ,**__UpperCAmelCase ,) -> Union[str, Any]:
lowerCAmelCase__ : List[str] = vocab_size
# Backward compatibility with n_embed kwarg
lowerCAmelCase__ : List[str] = kwargs.pop("""n_embed""" ,__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = hidden_size if n_embed is None else n_embed
lowerCAmelCase__ : List[Any] = num_hidden_layers
lowerCAmelCase__ : Optional[Any] = num_attention_heads
lowerCAmelCase__ : str = layer_norm_epsilon
lowerCAmelCase__ : int = initializer_range
lowerCAmelCase__ : str = use_cache
lowerCAmelCase__ : str = hidden_dropout
lowerCAmelCase__ : Tuple = attention_dropout
lowerCAmelCase__ : Tuple = bos_token_id
lowerCAmelCase__ : Union[str, Any] = eos_token_id
lowerCAmelCase__ : Any = num_attention_heads if num_kv_heads is None else num_kv_heads
lowerCAmelCase__ : int = alibi
lowerCAmelCase__ : Any = new_decoder_architecture
lowerCAmelCase__ : Optional[int] = multi_query # Ignored when new_decoder_architecture is True
lowerCAmelCase__ : Union[str, Any] = parallel_attn
lowerCAmelCase__ : str = bias
super().__init__(bos_token_id=__UpperCAmelCase ,eos_token_id=__UpperCAmelCase ,**__UpperCAmelCase )
@property
def UpperCAmelCase_ ( self ) -> str:
return self.hidden_size // self.num_attention_heads
@property
def UpperCAmelCase_ ( self ) -> Tuple:
return not self.alibi
| 37 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {'''vocab_file''': '''spiece.model'''}
_lowerCAmelCase = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
_lowerCAmelCase = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
_lowerCAmelCase = '''▁'''
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Any = VOCAB_FILES_NAMES
__lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,__UpperCAmelCase="[CLS]" ,__UpperCAmelCase="[SEP]" ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase="[SEP]" ,__UpperCAmelCase="<pad>" ,__UpperCAmelCase="[CLS]" ,__UpperCAmelCase="[MASK]" ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCAmelCase__ : Tuple = (
AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ,normalized=__UpperCAmelCase )
if isinstance(__UpperCAmelCase ,__UpperCAmelCase )
else mask_token
)
lowerCAmelCase__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase ,remove_space=__UpperCAmelCase ,keep_accents=__UpperCAmelCase ,bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,sep_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,cls_token=__UpperCAmelCase ,mask_token=__UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCAmelCase ,)
lowerCAmelCase__ : str = do_lower_case
lowerCAmelCase__ : int = remove_space
lowerCAmelCase__ : Tuple = keep_accents
lowerCAmelCase__ : Any = vocab_file
lowerCAmelCase__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@property
def UpperCAmelCase_ ( self ) -> Optional[int]:
return len(self.sp_model )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ : Union[str, Any] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Any:
lowerCAmelCase__ : Optional[Any] = self.__dict__.copy()
lowerCAmelCase__ : Optional[Any] = None
return state
def __setstate__( self ,__UpperCAmelCase ) -> List[Any]:
lowerCAmelCase__ : List[str] = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
lowerCAmelCase__ : Union[str, Any] = {}
lowerCAmelCase__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[int]:
if self.remove_space:
lowerCAmelCase__ : int = """ """.join(inputs.strip().split() )
else:
lowerCAmelCase__ : str = inputs
lowerCAmelCase__ : Tuple = outputs.replace("""``""" ,"""\"""" ).replace("""''""" ,"""\"""" )
if not self.keep_accents:
lowerCAmelCase__ : Any = unicodedata.normalize("""NFKD""" ,__UpperCAmelCase )
lowerCAmelCase__ : Dict = """""".join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] )
if self.do_lower_case:
lowerCAmelCase__ : Tuple = outputs.lower()
return outputs
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : List[str] = self.preprocess_text(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = self.sp_model.encode(__UpperCAmelCase ,out_type=__UpperCAmelCase )
lowerCAmelCase__ : str = []
for piece in pieces:
if len(__UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
lowerCAmelCase__ : List[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase ,"""""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase__ : str = cur_pieces[1:]
else:
lowerCAmelCase__ : int = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCAmelCase )
else:
new_pieces.append(__UpperCAmelCase )
return new_pieces
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
return self.sp_model.PieceToId(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any:
return self.sp_model.IdToPiece(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
lowerCAmelCase__ : str = []
lowerCAmelCase__ : Tuple = """"""
lowerCAmelCase__ : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : List[str] = []
else:
current_sub_tokens.append(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
lowerCAmelCase__ : int = [self.sep_token_id]
lowerCAmelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase ,token_ids_a=__UpperCAmelCase ,already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
lowerCAmelCase__ : List[str] = [self.sep_token_id]
lowerCAmelCase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : int = os.path.join(
__UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase ,"""wb""" ) as fi:
lowerCAmelCase__ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 37 | 1 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
lowerCAmelCase__ : Tuple = Lock()
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0, 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_UpperCAmelCase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
__UpperCAmelCase : Union[str, Any] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
__UpperCAmelCase : str = min(_UpperCAmelCase, _UpperCAmelCase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_UpperCAmelCase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
__UpperCAmelCase : List[str] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
__UpperCAmelCase : List[Any] = max(_UpperCAmelCase, _UpperCAmelCase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_UpperCAmelCase )
def __UpperCamelCase ( _UpperCAmelCase ):
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : Dict = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
__UpperCAmelCase : List[Any] = Pipe()
__UpperCAmelCase : Any = Pipe()
process_array_.append(
Process(
target=_UpperCAmelCase, args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]), ) )
__UpperCAmelCase : Any = temp_rs
__UpperCAmelCase : List[Any] = temp_rr
for i in range(1, len(_UpperCAmelCase ) - 1 ):
__UpperCAmelCase : Union[str, Any] = Pipe()
__UpperCAmelCase : str = Pipe()
process_array_.append(
Process(
target=_UpperCAmelCase, args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]), ) )
__UpperCAmelCase : Dict = temp_rs
__UpperCAmelCase : List[str] = temp_rr
process_array_.append(
Process(
target=_UpperCAmelCase, args=(
len(_UpperCAmelCase ) - 1,
arr[len(_UpperCAmelCase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_UpperCAmelCase ) - 1],
), ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0, len(_UpperCAmelCase ) ):
__UpperCAmelCase : Any = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __UpperCamelCase ( ):
__UpperCAmelCase : Tuple = list(range(10, 0, -1 ) )
print("Initial List" )
print(*_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = odd_even_transposition(_UpperCAmelCase )
print("Sorted List\n" )
print(*_UpperCAmelCase )
if __name__ == "__main__":
main()
| 37 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
lowerCAmelCase__ : Union[str, Any] = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def __UpperCamelCase ( ):
__UpperCAmelCase : Optional[int] = Github(os.environ["GITHUB_TOKEN"] )
__UpperCAmelCase : Union[str, Any] = g.get_repo("huggingface/transformers" )
__UpperCAmelCase : Union[str, Any] = repo.get_issues(state="open" )
for issue in open_issues:
__UpperCAmelCase : int = sorted([comment for comment in issue.get_comments()], key=lambda _UpperCAmelCase : i.created_at, reverse=_UpperCAmelCase )
__UpperCAmelCase : Any = comments[0] if len(_UpperCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="closed" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 37 | 1 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __A ( self : Any ):
A_ = 1
A_ = 3
A_ = (32, 32)
A_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCAmelCase )
return image
@property
def __A ( self : Union[str, Any] ):
torch.manual_seed(0 )
A_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def __A ( self : Dict ):
torch.manual_seed(0 )
A_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def __A ( self : str ):
torch.manual_seed(0 )
A_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(UpperCAmelCase )
@property
def __A ( self : Union[str, Any] ):
def extract(*UpperCAmelCase : Tuple , **UpperCAmelCase : Tuple ):
class _a :
"""simple docstring"""
def __init__( self : str ):
A_ = torch.ones([0] )
def __A ( self : List[Any] , UpperCAmelCase : List[str] ):
self.pixel_values.to(UpperCAmelCase )
return self
return Out()
return extract
def __A ( self : Dict ):
A_ = "cpu" # ensure determinism for the device-dependent torch.Generator
A_ = self.dummy_cond_unet
A_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase , set_alpha_to_one=UpperCAmelCase , )
A_ = self.dummy_vae
A_ = self.dummy_text_encoder
A_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
A_ = StableDiffusionPipeline(
unet=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=self.dummy_extractor , )
A_ = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
A_ = "A painting of a squirrel eating a burger"
A_ = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
A_ = sd_pipe([prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
A_ = output.images
A_ = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
A_ = sd_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=UpperCAmelCase , )[0]
A_ = image[0, -3:, -3:, -1]
A_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __A ( self : List[Any] ):
A_ = "cpu" # ensure determinism for the device-dependent torch.Generator
A_ = self.dummy_cond_unet
A_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase )
A_ = self.dummy_vae
A_ = self.dummy_text_encoder
A_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
A_ = StableDiffusionPipeline(
unet=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=self.dummy_extractor , )
A_ = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
A_ = "A painting of a squirrel eating a burger"
A_ = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
A_ = sd_pipe([prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
A_ = output.images
A_ = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
A_ = sd_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=UpperCAmelCase , )[0]
A_ = image[0, -3:, -3:, -1]
A_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __A ( self : Any ):
A_ = StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=UpperCAmelCase )
assert isinstance(UpperCAmelCase , UpperCAmelCase )
assert isinstance(pipe.scheduler , UpperCAmelCase )
assert pipe.safety_checker is None
A_ = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase )
A_ = StableDiffusionPipeline.from_pretrained(UpperCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
A_ = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def __A ( self : Dict ):
A_ = self.dummy_cond_unet
A_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase )
A_ = self.dummy_vae
A_ = self.dummy_text_encoder
A_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# put models in fp16
A_ = unet.half()
A_ = vae.half()
A_ = bert.half()
# make sure here that pndm scheduler skips prk
A_ = StableDiffusionPipeline(
unet=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=self.dummy_extractor , )
A_ = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
A_ = "A painting of a squirrel eating a burger"
A_ = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Union[str, Any] ):
A_ = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=UpperCAmelCase )
A_ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
A_ = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
A_ = (
"portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"
" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"
" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"
" children from bahnhof zoo, detailed "
)
A_ = 4003660346
A_ = 7
# without safety guidance (sld_guidance_scale = 0)
A_ = torch.manual_seed(UpperCAmelCase )
A_ = sd_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=UpperCAmelCase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
A_ = output.images
A_ = image[0, -3:, -3:, -1]
A_ = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
A_ = torch.manual_seed(UpperCAmelCase )
A_ = sd_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=UpperCAmelCase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
A_ = output.images
A_ = image[0, -3:, -3:, -1]
A_ = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __A ( self : Tuple ):
A_ = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=UpperCAmelCase )
A_ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
A_ = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
A_ = "padme amidala taking a bath artwork, safe for work, no nudity"
A_ = 2734971755
A_ = 7
A_ = torch.manual_seed(UpperCAmelCase )
A_ = sd_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=UpperCAmelCase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
A_ = output.images
A_ = image[0, -3:, -3:, -1]
A_ = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
A_ = torch.manual_seed(UpperCAmelCase )
A_ = sd_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=UpperCAmelCase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
A_ = output.images
A_ = image[0, -3:, -3:, -1]
A_ = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __A ( self : List[str] ):
A_ = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" )
A_ = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
A_ = (
"the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."
" leyendecker"
)
A_ = 1044355234
A_ = 12
A_ = torch.manual_seed(UpperCAmelCase )
A_ = sd_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=UpperCAmelCase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
A_ = output.images
A_ = image[0, -3:, -3:, -1]
A_ = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
A_ = torch.manual_seed(UpperCAmelCase )
A_ = sd_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=UpperCAmelCase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
A_ = output.images
A_ = image[0, -3:, -3:, -1]
A_ = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 312 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _a :
"""simple docstring"""
@property
def __A ( self : Union[str, Any] ):
return self.get_dummy_input()
@property
def __A ( self : int ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def __A ( self : Union[str, Any] , UpperCAmelCase : List[Any]=True , UpperCAmelCase : str=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : Optional[Any]=False , ):
A_ = 4
A_ = 32
A_ = (32, 32)
A_ = torch.manual_seed(0 )
A_ = torch.device(UpperCAmelCase )
A_ = (batch_size, num_channels) + sizes
A_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase )
A_ = {"hidden_states": hidden_states}
if include_temb:
A_ = 128
A_ = randn_tensor((batch_size, temb_channels) , generator=UpperCAmelCase , device=UpperCAmelCase )
if include_res_hidden_states_tuple:
A_ = torch.manual_seed(1 )
A_ = (randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase ),)
if include_encoder_hidden_states:
A_ = floats_tensor((batch_size, 32, 32) ).to(UpperCAmelCase )
if include_skip_sample:
A_ = randn_tensor(((batch_size, 3) + sizes) , generator=UpperCAmelCase , device=UpperCAmelCase )
return dummy_input
def __A ( self : Optional[int] ):
A_ = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 128,
}
if self.block_type == "up":
A_ = 32
if self.block_type == "mid":
init_dict.pop("out_channels" )
A_ = self.dummy_input
return init_dict, inputs_dict
def __A ( self : List[str] , UpperCAmelCase : Optional[Any] ):
A_ , A_ = self.prepare_init_args_and_inputs_for_common()
A_ = self.block_class(**UpperCAmelCase )
unet_block.to(UpperCAmelCase )
unet_block.eval()
with torch.no_grad():
A_ = unet_block(**UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = output[0]
self.assertEqual(output.shape , self.output_shape )
A_ = output[0, -1, -3:, -3:]
A_ = torch.tensor(UpperCAmelCase ).to(UpperCAmelCase )
assert torch_all_close(output_slice.flatten() , UpperCAmelCase , atol=5E-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def __A ( self : Union[str, Any] ):
A_ , A_ = self.prepare_init_args_and_inputs_for_common()
A_ = self.block_class(**UpperCAmelCase )
model.to(UpperCAmelCase )
model.train()
A_ = model(**UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = output[0]
A_ = torch.device(UpperCAmelCase )
A_ = randn_tensor(output.shape , device=UpperCAmelCase )
A_ = torch.nn.functional.mse_loss(UpperCAmelCase , UpperCAmelCase )
loss.backward() | 312 | 1 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple=13 , SCREAMING_SNAKE_CASE : Tuple=7 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : Tuple=99 , SCREAMING_SNAKE_CASE : Optional[int]=32 , SCREAMING_SNAKE_CASE : Any=5 , SCREAMING_SNAKE_CASE : str=4 , SCREAMING_SNAKE_CASE : List[Any]=37 , SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : str=512 , SCREAMING_SNAKE_CASE : Any=16 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : str=0.02 , SCREAMING_SNAKE_CASE : List[Any]=4 , ):
lowercase__ : List[Any] = parent
lowercase__ : int = batch_size
lowercase__ : Tuple = seq_length
lowercase__ : int = is_training
lowercase__ : Dict = use_attention_mask
lowercase__ : Optional[Any] = use_token_type_ids
lowercase__ : Any = use_labels
lowercase__ : List[Any] = vocab_size
lowercase__ : Optional[int] = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Dict = num_attention_heads
lowercase__ : Any = intermediate_size
lowercase__ : Tuple = hidden_act
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : Optional[Any] = attention_probs_dropout_prob
lowercase__ : Optional[Any] = max_position_embeddings
lowercase__ : str = type_vocab_size
lowercase__ : Optional[int] = type_sequence_label_size
lowercase__ : Dict = initializer_range
lowercase__ : Optional[Any] = num_choices
def snake_case ( self : int ):
lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Union[str, Any] = None
if self.use_attention_mask:
lowercase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : List[Any] = None
if self.use_token_type_ids:
lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : Tuple = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case ( self : List[Any] ):
lowercase__ : List[Any] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = config_and_inputs
lowercase__ : Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class snake_case__(_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case ( self : str ):
lowercase__ : int = FlaxAlbertModelTester(self )
@slow
def snake_case ( self : str ):
for model_class_name in self.all_model_classes:
lowercase__ : str = model_class_name.from_pretrained("albert-base-v2" )
lowercase__ : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = FlaxAlbertModel.from_pretrained("albert-base-v2" )
lowercase__ : Dict = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
lowercase__ : List[str] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase__ : Dict = model(_lowercase , attention_mask=_lowercase )[0]
lowercase__ : Any = (1, 11, 768)
self.assertEqual(output.shape , _lowercase )
lowercase__ : Optional[int] = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _lowercase , atol=1E-4 ) )
| 366 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''▁'''
lowerCAmelCase__ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
lowerCAmelCase__ = {'''vinai/bartpho-syllable''': 1_0_2_4}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int]="<s>" , SCREAMING_SNAKE_CASE : Optional[int]="</s>" , SCREAMING_SNAKE_CASE : str="</s>" , SCREAMING_SNAKE_CASE : List[str]="<s>" , SCREAMING_SNAKE_CASE : Union[str, Any]="<unk>" , SCREAMING_SNAKE_CASE : Tuple="<pad>" , SCREAMING_SNAKE_CASE : List[str]="<mask>" , SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE : int , ):
# Mask token behave like a normal word, i.e. include the space before it
lowercase__ : Dict = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
lowercase__ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , )
lowercase__ : Dict = vocab_file
lowercase__ : Union[str, Any] = monolingual_vocab_file
lowercase__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowercase__ : Any = {}
lowercase__ : int = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
lowercase__ : Dict = cnt
cnt += 1
with open(SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" ) as f:
for line in f.readlines():
lowercase__ : int = line.strip().split()[0]
lowercase__ : List[str] = len(self.fairseq_tokens_to_ids )
if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
lowercase__ : Optional[Any] = len(self.fairseq_tokens_to_ids )
lowercase__ : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ):
lowercase__ : Dict = self.__dict__.copy()
lowercase__ : Union[str, Any] = None
lowercase__ : Dict = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase__ : Dict = {}
lowercase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ : List[Any] = [self.cls_token_id]
lowercase__ : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
def snake_case ( self : str , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
lowercase__ : Tuple = [self.sep_token_id]
lowercase__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case ( self : Optional[int] ):
return len(self.fairseq_ids_to_tokens )
def snake_case ( self : List[Any] ):
lowercase__ : Any = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : str ):
return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : int ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Any ):
return self.fairseq_ids_to_tokens[index]
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase__ : str = "".join(SCREAMING_SNAKE_CASE ).replace(SCREAMING_SNAKE_CASE , " " ).strip()
return out_string
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : str = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : List[str] = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE , "wb" ) as fi:
lowercase__ : str = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
SCREAMING_SNAKE_CASE ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"""{str(SCREAMING_SNAKE_CASE )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 121 | 0 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : int , lowercase : int=1024 , lowercase : int=1024 , lowercase : Tuple=False , **lowercase : Optional[int] ) -> Union[str, Any]:
_a = AutoTokenizer.from_pretrained(lowercase )
_a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="train" , **lowercase )
_a = tok.pad_token_id
def get_lens(lowercase : Optional[int] ):
_a = tqdm(
DataLoader(lowercase , batch_size=512 , num_workers=8 , shuffle=lowercase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_a = []
for batch in dl:
_a = batch["input_ids"].ne(lowercase ).sum(1 ).tolist()
_a = batch["labels"].ne(lowercase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowercase , lowercase ):
max_lens.append(max(lowercase , lowercase ) )
else:
max_lens.extend(lowercase )
return max_lens
_a = get_lens(lowercase )
_a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="val" , **lowercase )
_a = get_lens(lowercase )
pickle_save(lowercase , train_ds.len_file )
pickle_save(lowercase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 63 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[Any] ) -> Optional[Any]:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ):
A = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
A = 'sgugger/tiny-distilbert-classification'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,only_pretrain_model=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,torchscript=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,fpaa=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
A = 'sshleifer/tiny-gpt2'
A = AutoConfig.from_pretrained(A_ )
# set architectures equal to `None`
A = None
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' ,'Can\'t do half precision' )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,fpaa=A_ ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = 'sshleifer/tiny-gpt2'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = 'sshleifer/tinier_bart'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = 'sshleifer/tiny-gpt2'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = 'sshleifer/tinier_bart'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
A = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,save_to_csv=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(A_ ,'inf_time.csv' ) ,train_memory_csv_file=os.path.join(A_ ,'train_mem.csv' ) ,inference_memory_csv_file=os.path.join(A_ ,'inf_mem.csv' ) ,train_time_csv_file=os.path.join(A_ ,'train_time.csv' ) ,env_info_csv_file=os.path.join(A_ ,'env.csv' ) ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
benchmark.run()
self.assertTrue(Path(os.path.join(A_ ,'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'env.csv' ) ).exists() )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
A = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(A_ : Optional[int] ):
self.assertTrue(hasattr(A_ ,'sequential' ) )
self.assertTrue(hasattr(A_ ,'cumulative' ) )
self.assertTrue(hasattr(A_ ,'current' ) )
self.assertTrue(hasattr(A_ ,'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(A_ ,'log.txt' ) ,log_print=A_ ,trace_memory_line_by_line=A_ ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(A_ ,'log.txt' ) ).exists() ) | 74 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
__a :int = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
if "resnet-50" in model_name:
A_ = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
A_ = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
A_ = DetrConfig(use_timm_backbone=a__ ,backbone_config=a__ )
# set label attributes
A_ = "panoptic" in model_name
if is_panoptic:
A_ = 250
else:
A_ = 91
A_ = "huggingface/label-files"
A_ = "coco-detection-id2label.json"
A_ = json.load(open(hf_hub_download(a__ ,a__ ,repo_type="dataset" ) ,"r" ) )
A_ = {int(a__ ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def __snake_case ( __UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
f'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
f'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = state_dict.pop(a__ )
A_ = val
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : Union[str, Any]=False ):
"""simple docstring"""
A_ = ""
if is_panoptic:
A_ = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A_ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
A_ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
A_ = in_proj_weight[:256, :]
A_ = in_proj_bias[:256]
A_ = in_proj_weight[256:512, :]
A_ = in_proj_bias[256:512]
A_ = in_proj_weight[-256:, :]
A_ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
A_ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
A_ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
A_ = in_proj_weight[:256, :]
A_ = in_proj_bias[:256]
A_ = in_proj_weight[256:512, :]
A_ = in_proj_bias[256:512]
A_ = in_proj_weight[-256:, :]
A_ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
A_ = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
A_ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
A_ = in_proj_weight_cross_attn[:256, :]
A_ = in_proj_bias_cross_attn[:256]
A_ = in_proj_weight_cross_attn[256:512, :]
A_ = in_proj_bias_cross_attn[256:512]
A_ = in_proj_weight_cross_attn[-256:, :]
A_ = in_proj_bias_cross_attn[-256:]
def __snake_case ( ):
"""simple docstring"""
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(a__ ,stream=a__ ).raw )
return im
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Dict=None ,__UpperCamelCase : Any=False ):
"""simple docstring"""
A_ , A_ = get_detr_config(a__ )
# load original model from torch hub
A_ = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(f'''Converting model {model_name}...''' )
A_ = torch.hub.load("facebookresearch/detr" ,model_name_to_original_name[model_name] ,pretrained=a__ ).eval()
A_ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(a__ ):
if is_panoptic:
A_ = "detr." + src
rename_key(a__ ,a__ ,a__ )
# query, key and value matrices need special treatment
read_in_q_k_v(a__ ,is_panoptic=a__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A_ = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
A_ = state_dict.pop(a__ )
A_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
A_ = state_dict.pop(a__ )
A_ = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
A_ = state_dict.pop(a__ )
A_ = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
A_ = state_dict.pop(a__ )
A_ = val
# finally, create HuggingFace model and load state dict
A_ = DetrForSegmentation(a__ ) if is_panoptic else DetrForObjectDetection(a__ )
model.load_state_dict(a__ )
model.eval()
# verify our conversion on an image
A_ = "coco_panoptic" if is_panoptic else "coco_detection"
A_ = DetrImageProcessor(format=a__ )
A_ = processor(images=prepare_img() ,return_tensors="pt" )
A_ = encoding["pixel_values"]
A_ = detr(a__ )
A_ = model(a__ )
assert torch.allclose(outputs.logits ,original_outputs["pred_logits"] ,atol=1E-3 )
assert torch.allclose(outputs.pred_boxes ,original_outputs["pred_boxes"] ,atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks ,original_outputs["pred_masks"] ,atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(a__ ).mkdir(exist_ok=a__ )
model.save_pretrained(a__ )
processor.save_pretrained(a__ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(f'''nielsr/{model_name}''' )
processor.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
__a :Tuple = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
__a :Optional[int] = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 361 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
for char in word:
A_ = ord(__UpperCamelCase )
if not _is_chinese_char(__UpperCamelCase ):
return 0
return 1
def __snake_case ( __UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = set()
for token in tokens:
A_ = len(__UpperCamelCase ) > 1 and is_chinese(__UpperCamelCase )
if chinese_word:
word_set.add(__UpperCamelCase )
A_ = list(__UpperCamelCase )
return word_list
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : set() ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
A_ = max([len(__UpperCamelCase ) for w in chinese_word_set] )
A_ = bert_tokens
A_ , A_ = 0, len(__UpperCamelCase )
while start < end:
A_ = True
if is_chinese(bert_word[start] ):
A_ = min(end - start ,__UpperCamelCase )
for i in range(__UpperCamelCase ,1 ,-1 ):
A_ = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 ,start + i ):
A_ = "##" + bert_word[j]
A_ = start + i
A_ = False
break
if single_word:
start += 1
return bert_word
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : LTP ,__UpperCamelCase : BertTokenizer ):
"""simple docstring"""
A_ = []
for i in range(0 ,len(__UpperCamelCase ) ,100 ):
A_ = ltp_tokenizer.seg(lines[i : i + 100] )[0]
A_ = [get_chinese_word(__UpperCamelCase ) for r in res]
ltp_res.extend(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A_ = []
for i in range(0 ,len(__UpperCamelCase ) ,100 ):
A_ = bert_tokenizer(lines[i : i + 100] ,add_special_tokens=__UpperCamelCase ,truncation=__UpperCamelCase ,max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A_ = []
for input_ids, chinese_word in zip(__UpperCamelCase ,__UpperCamelCase ):
A_ = []
for id in input_ids:
A_ = bert_tokenizer._convert_id_to_token(__UpperCamelCase )
input_tokens.append(__UpperCamelCase )
A_ = add_sub_symbol(__UpperCamelCase ,__UpperCamelCase )
A_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__UpperCamelCase ):
if token[:2] == "##":
A_ = token[2:]
# save chinese tokens' pos
if len(__UpperCamelCase ) == 1 and _is_chinese_char(ord(__UpperCamelCase ) ):
ref_id.append(__UpperCamelCase )
ref_ids.append(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
return ref_ids
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
with open(args.file_name ,"r" ,encoding="utf-8" ) as f:
A_ = f.readlines()
A_ = [line.strip() for line in data if len(__UpperCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ = LTP(args.ltp ) # faster in GPU device
A_ = BertTokenizer.from_pretrained(args.bert )
A_ = prepare_ref(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
with open(args.save_path ,"w" ,encoding="utf-8" ) as f:
A_ = [json.dumps(__UpperCamelCase ) + "\n" for ref in ref_ids]
f.writelines(__UpperCamelCase )
if __name__ == "__main__":
__a :List[Any] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
__a :Dict = parser.parse_args()
main(args) | 329 | 0 |
from heapq import heappop, heappush
import numpy as np
def __UpperCamelCase ( _A : np.ndarray , _A : tuple[int, int] , _A : tuple[int, int] , _A : bool , ) ->tuple[float | int, list[tuple[int, int]]]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ =grid.shape
lowerCamelCase_ =[-1, 1, 0, 0]
lowerCamelCase_ =[0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowerCamelCase_ , lowerCamelCase_ =[(0, source)], set()
lowerCamelCase_ =np.full((rows, cols) , np.inf )
lowerCamelCase_ =0
lowerCamelCase_ =np.empty((rows, cols) , dtype=_A )
lowerCamelCase_ =None
while queue:
((lowerCamelCase_) , (lowerCamelCase_)) =heappop(_A )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowerCamelCase_ =[]
while (x, y) != source:
path.append((x, y) )
lowerCamelCase_ , lowerCamelCase_ =predecessors[x, y]
path.append(_A ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_A ) ):
lowerCamelCase_ , lowerCamelCase_ =x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowerCamelCase_ =grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_A , (dist + 1, (nx, ny)) )
lowerCamelCase_ =dist + 1
lowerCamelCase_ =(x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 154 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
__A : Optional[int] = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.'
def __UpperCamelCase ( _A : Dict=None ) ->Dict:
"""simple docstring"""
if subparsers is not None:
lowerCamelCase_ =subparsers.add_parser("""tpu-config""" , description=_description )
else:
lowerCamelCase_ =argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description )
# Core arguments
lowerCamelCase_ =parser.add_argument_group(
"""Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" )
config_args.add_argument(
"""--config_file""" , type=_A , default=_A , help="""Path to the config file to use for accelerate.""" , )
config_args.add_argument(
"""--tpu_name""" , default=_A , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , )
config_args.add_argument(
"""--tpu_zone""" , default=_A , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , )
lowerCamelCase_ =parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" )
pod_args.add_argument(
"""--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , )
pod_args.add_argument(
"""--command_file""" , default=_A , help="""The path to the file containing the commands to run on the pod on startup.""" , )
pod_args.add_argument(
"""--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , )
pod_args.add_argument(
"""--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , )
pod_args.add_argument(
"""--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""" , )
pod_args.add_argument(
"""--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" )
if subparsers is not None:
parser.set_defaults(func=_A )
return parser
def __UpperCamelCase ( _A : Tuple ) ->Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_A ):
lowerCamelCase_ =load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
lowerCamelCase_ =defaults.command_file
if not args.command and defaults.commands is not None:
lowerCamelCase_ =defaults.commands
if not args.tpu_name:
lowerCamelCase_ =defaults.tpu_name
if not args.tpu_zone:
lowerCamelCase_ =defaults.tpu_zone
if args.accelerate_version == "dev":
lowerCamelCase_ ="""git+https://github.com/huggingface/accelerate.git"""
elif args.accelerate_version == "latest":
lowerCamelCase_ ="""accelerate -U"""
elif isinstance(parse(args.accelerate_version ) , _A ):
lowerCamelCase_ =f'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError("""You must specify either a command file or a command to run on the pod.""" )
if args.command_file:
with open(args.command_file , """r""" ) as f:
lowerCamelCase_ =[f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _A ):
lowerCamelCase_ =[line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
lowerCamelCase_ =["""cd /usr/share"""]
if args.install_accelerate:
new_cmd += [f'pip install {args.accelerate_version}']
new_cmd += args.command
lowerCamelCase_ ="""; """.join(_A )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
lowerCamelCase_ =["""gcloud"""]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'Running {" ".join(_A )}' )
return
subprocess.run(_A )
print("""Successfully setup pod.""" )
def __UpperCamelCase ( ) ->Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =tpu_command_parser()
lowerCamelCase_ =parser.parse_args()
tpu_command_launcher(_A )
| 154 | 1 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = "ssube/stable-diffusion-x4-upscaler-onnx"
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Union[str, Any]=0):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowercase_))
SCREAMING_SNAKE_CASE_ : List[str] = torch.manual_seed(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : Dict = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Any = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Any = np.array(
[0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Tuple = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Tuple = np.array(
[0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : List[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : int = np.array(
[0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = ort.SessionOptions()
SCREAMING_SNAKE_CASE_ : Optional[int] = False
return options
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
SCREAMING_SNAKE_CASE_ : Tuple = init_image.resize((128, 128))
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''A fantasy landscape, trending on artstation'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : List[Any] = pipe(
prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase_ , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images
SCREAMING_SNAKE_CASE_ : Optional[int] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : int = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
SCREAMING_SNAKE_CASE_ : Tuple = init_image.resize((128, 128))
SCREAMING_SNAKE_CASE_ : Tuple = LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''')
SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : int = '''A fantasy landscape, trending on artstation'''
SCREAMING_SNAKE_CASE_ : List[Any] = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : int = pipe(
prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=20 , generator=lowercase_ , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images
SCREAMING_SNAKE_CASE_ : Dict = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : List[str] = np.array(
[0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 368 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
UpperCAmelCase_ : Any = """examples/"""
UpperCAmelCase_ : Optional[int] = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
UpperCAmelCase_ : List[Any] = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
UpperCAmelCase_ : Optional[int] = """README.md"""
def _A (__a , __a , __a ) -> int:
"""simple docstring"""
with open(__a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = f.read()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = REPLACE_PATTERNS[pattern]
SCREAMING_SNAKE_CASE_ : Optional[int] = replace.replace('''VERSION''' , __a )
SCREAMING_SNAKE_CASE_ : Tuple = re_pattern.sub(__a , __a )
with open(__a , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(__a )
def _A (__a ) -> int:
"""simple docstring"""
for folder, directories, fnames in os.walk(__a ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__a , __a ) , __a , pattern='''examples''' )
def _A (__a , __a=False ) -> List[str]:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__a , __a , __a )
if not patch:
update_version_in_examples(__a )
def _A () -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''🤗 Transformers currently provides the following architectures'''
SCREAMING_SNAKE_CASE_ : Optional[int] = '''1. Want to contribute a new model?'''
with open(__a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE_ : Tuple = f.readlines()
# Find the start of the list.
SCREAMING_SNAKE_CASE_ : Tuple = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE_ : Dict = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
SCREAMING_SNAKE_CASE_ : List[Any] = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(__a , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__a )
def _A () -> List[str]:
"""simple docstring"""
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
SCREAMING_SNAKE_CASE_ : Any = f.read()
SCREAMING_SNAKE_CASE_ : Dict = REPLACE_PATTERNS['''init'''][0].search(__a ).groups()[0]
return packaging.version.parse(__a )
def _A (__a=False ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
SCREAMING_SNAKE_CASE_ : List[Any] = default_version.base_version
elif patch:
SCREAMING_SNAKE_CASE_ : int = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
SCREAMING_SNAKE_CASE_ : Any = f'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
SCREAMING_SNAKE_CASE_ : int = input(f'Which version are you releasing? [{default_version}]' )
if len(__a ) == 0:
SCREAMING_SNAKE_CASE_ : Optional[Any] = default_version
print(f'Updating version to {version}.' )
global_version_update(__a , patch=__a )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def _A () -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = get_version()
SCREAMING_SNAKE_CASE_ : Any = f'{current_version.major}.{current_version.minor + 1}.0.dev0'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
SCREAMING_SNAKE_CASE_ : int = input(f'Which version are we developing now? [{dev_version}]' )
if len(__a ) == 0:
SCREAMING_SNAKE_CASE_ : Optional[int] = dev_version
print(f'Updating version to {version}.' )
global_version_update(__a )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
UpperCAmelCase_ : int = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 318 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__UpperCAmelCase = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 323 |
'''simple docstring'''
from __future__ import annotations
__UpperCAmelCase = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : dict[str, list[str]] , lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = graph
# mapping node to its parent in resulting breadth first tree
SCREAMING_SNAKE_CASE : dict[str, str | None] = {}
SCREAMING_SNAKE_CASE : List[str] = source_vertex
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {self.source_vertex}
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Optional[Any] = [self.source_vertex] # first in first out queue
while queue:
SCREAMING_SNAKE_CASE : str = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = vertex
queue.append(lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
SCREAMING_SNAKE_CASE : Optional[Any] = self.parent.get(lowerCamelCase_ )
if target_vertex_parent is None:
SCREAMING_SNAKE_CASE : Tuple = (
f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(lowerCamelCase_ )
return self.shortest_path(lowerCamelCase_ ) + f'''->{target_vertex}'''
if __name__ == "__main__":
__UpperCAmelCase = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 323 | 1 |
def A ( _UpperCAmelCase : int ) -> "list[int]":
'''simple docstring'''
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
_UpperCAmelCase = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
_UpperCAmelCase = 1
if upper_limit > 0:
_UpperCAmelCase = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(_UpperCAmelCase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("\n********* Catalan Numbers Using Dynamic Programming ************\n")
print("\n*** Enter -1 at any time to quit ***")
print("\nEnter the upper limit (≥ 0) for the Catalan number sequence: ", end="")
try:
while True:
UpperCAmelCase__ = int(input().strip())
if N < 0:
print("\n********* Goodbye!! ************")
break
else:
print(f"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("Try another upper limit for the sequence: ", end="")
except (NameError, ValueError):
print("\n********* Invalid input, goodbye! ************\n")
import doctest
doctest.testmod()
| 290 |
UpperCAmelCase__ = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
def A ( _UpperCAmelCase : dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] ) -> list[str]:
'''simple docstring'''
_UpperCAmelCase = set()
# keep track of all the paths to be checked
_UpperCAmelCase = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_UpperCAmelCase = queue.pop(0 )
# get the last node from the path
_UpperCAmelCase = path[-1]
if node not in explored:
_UpperCAmelCase = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_UpperCAmelCase = list(_UpperCAmelCase )
new_path.append(_UpperCAmelCase )
queue.append(_UpperCAmelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_UpperCAmelCase )
# in case there's no path between the 2 nodes
return []
def A ( _UpperCAmelCase : dict , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] ) -> int:
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_UpperCAmelCase = [start]
_UpperCAmelCase = set(_UpperCAmelCase )
# Keep tab on distances from `start` node.
_UpperCAmelCase = {start: 0, target: -1}
while queue:
_UpperCAmelCase = queue.pop(0 )
if node == target:
_UpperCAmelCase = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_UpperCAmelCase )
queue.append(_UpperCAmelCase )
_UpperCAmelCase = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
| 290 | 1 |
'''simple docstring'''
def _a( UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =len(__UpperCAmelCase )
for _ in range(__UpperCAmelCase ):
for i in range(_ % 2, arr_size - 1, 2 ):
if arr[i + 1] < arr[i]:
SCREAMING_SNAKE_CASE__ : Any =arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
a_ = list(range(1_0, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''') | 152 | """simple docstring"""
__A = [
(1_0_0_0, "M"),
(9_0_0, "CM"),
(5_0_0, "D"),
(4_0_0, "CD"),
(1_0_0, "C"),
(9_0, "XC"),
(5_0, "L"),
(4_0, "XL"),
(1_0, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int:
lowercase__: Dict = {'''I''': 1, '''V''': 5, '''X''': 1_0, '''L''': 5_0, '''C''': 1_0_0, '''D''': 5_0_0, '''M''': 1_0_0_0}
lowercase__: List[str] = 0
lowercase__: List[Any] = 0
while place < len(__UpperCAmelCase ):
if (place + 1 < len(__UpperCAmelCase )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> str:
lowercase__: Optional[Any] = []
for arabic, roman in ROMAN:
((lowercase__), (lowercase__)): Tuple = divmod(__UpperCAmelCase , __UpperCAmelCase )
result.append(roman * factor )
if number == 0:
break
return "".join(__UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 177 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
lowerCAmelCase : Tuple =logging.get_logger(__name__)
def UpperCAmelCase_ ( __lowerCamelCase : Union[tf.Tensor, np.ndarray] ):
if isinstance(__lowerCamelCase ,np.ndarray ):
return list(tensor.shape )
lowercase_ :Optional[int] = tf.shape(__lowerCamelCase )
if tensor.shape == tf.TensorShape(__lowerCamelCase ):
return dynamic
lowercase_ :Union[str, Any] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(__lowerCamelCase )]
def UpperCAmelCase_ ( __lowerCamelCase : tf.Tensor ,__lowerCamelCase : Optional[int] = None ,__lowerCamelCase : Optional[str] = None ):
return tf.nn.softmax(logits=logits + 1e-9 ,axis=__lowerCamelCase ,name=__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Any ,__lowerCamelCase : List[str]=1e-5 ,__lowerCamelCase : List[str]=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__lowerCamelCase ,__lowerCamelCase ):
raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." )
# Get mean and variance on the axis to be normalized
lowercase_ :List[str] = tf.nn.moments(__lowerCamelCase ,axes=[axis] ,keepdims=__lowerCamelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowercase_ :Union[str, Any] = [1] * inputs.shape.rank
lowercase_ :Optional[Any] = shape_list(__lowerCamelCase )[axis]
lowercase_ :List[str] = tf.reshape(__lowerCamelCase ,__lowerCamelCase )
lowercase_ :Dict = tf.reshape(__lowerCamelCase ,__lowerCamelCase )
# Compute layer normalization using the batch_normalization
# function.
lowercase_ :Union[str, Any] = tf.nn.batch_normalization(
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,offset=__lowerCamelCase ,scale=__lowerCamelCase ,variance_epsilon=__lowerCamelCase ,)
return outputs
def UpperCAmelCase_ ( __lowerCamelCase : Optional[int] ,__lowerCamelCase : Union[str, Any]=0 ,__lowerCamelCase : Dict=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowercase_ :Optional[int] = tf.shape(__lowerCamelCase )
lowercase_ :Optional[int] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowercase_ :List[str] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] ,axis=0 )
return tf.reshape(__lowerCamelCase ,__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : tf.Tensor ):
if not isinstance(__lowerCamelCase ,tf.Tensor ):
lowercase_ :str = tf.convert_to_tensor(__lowerCamelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowercase_ :List[Any] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowercase_ :Optional[int] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowercase_ :str = (
tf.cast(1 ,encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def UpperCAmelCase_ ( __lowerCamelCase : tf.Tensor ,__lowerCamelCase : int ,__lowerCamelCase : str = "input_ids" ):
tf.debugging.assert_less(
__lowerCamelCase ,tf.cast(__lowerCamelCase ,dtype=tensor.dtype ) ,message=(
F'The maximum value of {tensor_name} ({tf.math.reduce_max(__lowerCamelCase )}) must be smaller than the embedding '
F'layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'
) ,)
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ,__lowerCamelCase : Tuple ,__lowerCamelCase : Dict ):
lowercase_ :int = 6_45_12
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowercase_ :Union[str, Any] = [x for x in data if len(__lowerCamelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"The following attributes cannot be saved to HDF5 file because "
F'they are larger than {HDF5_OBJECT_HEADER_LIMIT} '
F'bytes: {bad_attributes}' )
lowercase_ :Union[str, Any] = np.asarray(__lowerCamelCase )
lowercase_ :Optional[int] = 1
lowercase_ :int = np.array_split(__lowerCamelCase ,__lowerCamelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowercase_ :List[Any] = np.array_split(__lowerCamelCase ,__lowerCamelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(__lowerCamelCase ):
lowercase_ :int = chunk_data
else:
lowercase_ :Tuple = data
def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : Tuple ):
if name in group.attrs:
lowercase_ :Optional[Any] = [n.decode("utf8" ) if hasattr(__lowerCamelCase ,"decode" ) else n for n in group.attrs[name]]
else:
lowercase_ :List[str] = []
lowercase_ :str = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("utf8" ) if hasattr(__lowerCamelCase ,"decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] )
chunk_id += 1
return data
def UpperCAmelCase_ ( __lowerCamelCase : str ):
def _expand_single_ad_tensor(__lowerCamelCase : Tuple ):
if isinstance(__lowerCamelCase ,tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(__lowerCamelCase ,axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor ,__lowerCamelCase )
| 353 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowerCamelCase : int | float | str ):
try:
lowercase_ :Optional[int] = float(__lowerCamelCase )
except ValueError:
raise ValueError("Please enter a valid number" )
lowercase_ :Dict = decimal - int(__lowerCamelCase )
if fractional_part == 0:
return int(__lowerCamelCase ), 1
else:
lowercase_ :Tuple = len(str(__lowerCamelCase ).split("." )[1] )
lowercase_ :Optional[Any] = int(decimal * (10**number_of_frac_digits) )
lowercase_ :Dict = 10**number_of_frac_digits
lowercase_ , lowercase_ :Optional[int] = denominator, numerator
while True:
lowercase_ :Any = dividend % divisor
if remainder == 0:
break
lowercase_ , lowercase_ :Optional[int] = divisor, remainder
lowercase_ , lowercase_ :Optional[int] = numerator / divisor, denominator / divisor
return int(__lowerCamelCase ), int(__lowerCamelCase )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction('67') = }''')
print(F'''{decimal_to_fraction('45.0') = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction('6.25') = }''')
print(F'''{decimal_to_fraction('78td') = }''')
| 147 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 249 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 249 | 1 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__lowercase = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
__lowercase = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
__lowercase = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return float((preds == labels).mean() )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = simple_accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = float(fa_score(y_true=SCREAMING_SNAKE_CASE , y_pred=SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )
__UpperCamelCase :Optional[Any] = float(spearmanr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Union[str, Any]:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''')
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32'''),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32'''),
}) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Tuple:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(__lowercase , __lowercase)}
elif self.config_name == "stsb":
return pearson_and_spearman(__lowercase , __lowercase)
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(__lowercase , __lowercase)
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(__lowercase , __lowercase)}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''')
| 105 | from __future__ import annotations
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif stress < 0:
raise ValueError('''Stress cannot be negative''' )
elif tangential_force < 0:
raise ValueError('''Tangential Force cannot be negative''' )
elif area < 0:
raise ValueError('''Area cannot be negative''' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowercase__ ( __lowerCamelCase):
UpperCamelCase_ = ["pixel_values"]
def __init__( self : List[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = True , **UpperCamelCase__ : str , ):
'''simple docstring'''
super().__init__(**__A )
SCREAMING_SNAKE_CASE : List[Any] = size if size is not None else {'''shortest_edge''': 224}
SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(__A , default_to_square=__A )
SCREAMING_SNAKE_CASE : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
SCREAMING_SNAKE_CASE : int = get_size_dict(__A , default_to_square=__A , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE : List[str] = do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size
SCREAMING_SNAKE_CASE : int = resample
SCREAMING_SNAKE_CASE : Tuple = do_center_crop
SCREAMING_SNAKE_CASE : Union[str, Any] = crop_size
SCREAMING_SNAKE_CASE : Any = do_rescale
SCREAMING_SNAKE_CASE : int = rescale_factor
SCREAMING_SNAKE_CASE : List[str] = do_normalize
SCREAMING_SNAKE_CASE : Dict = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE : Tuple = image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE : Tuple = do_convert_rgb
def __A ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = get_size_dict(__A , default_to_square=__A )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE : Any = get_resize_output_image_size(__A , size=size['''shortest_edge'''] , default_to_square=__A )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def __A ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__A , size=(size['''height'''], size['''width''']) , data_format=__A , **__A )
def __A ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
return rescale(__A , scale=__A , data_format=__A , **__A )
def __A ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def __A ( self : Union[str, Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Dict = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(__A , param_name='''size''' , default_to_square=__A )
SCREAMING_SNAKE_CASE : int = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE : Optional[Any] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(__A , param_name='''crop_size''' , default_to_square=__A )
SCREAMING_SNAKE_CASE : List[str] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : str = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : Optional[int] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE : int = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE : Any = [convert_to_rgb(__A ) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Optional[int] = [to_numpy_array(__A ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : List[str] = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE : str = [self.center_crop(image=__A , size=__A ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : str = [self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : Any = [self.normalize(image=__A , mean=__A , std=__A ) for image in images]
SCREAMING_SNAKE_CASE : Tuple = [to_channel_dimension_format(__A , __A ) for image in images]
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''pixel_values''': images}
return BatchFeature(data=__A , tensor_type=__A )
| 182 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : List[str] ={
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any =[
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
a__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 53 | 0 |
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
print("\nThe shortest path matrix using Floyd Warshall algorithm\n" )
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
if dist[i][j] != float("inf" ):
print(int(dist[i][j] ), end="\t" )
else:
print("INF", end="\t" )
print()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = [[float("inf" ) for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__lowerCamelCase ):
# looping through rows of graph array
for i in range(__lowerCamelCase ):
# looping through columns of graph array
for j in range(__lowerCamelCase ):
if (
dist[i][k] != float("inf" )
and dist[k][j] != float("inf" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
_SCREAMING_SNAKE_CASE : int = dist[i][k] + dist[k][j]
_print_dist(__lowerCamelCase, __lowerCamelCase )
return dist, v
if __name__ == "__main__":
UpperCamelCase__ =int(input('Enter number of vertices: '))
UpperCamelCase__ =int(input('Enter number of edges: '))
UpperCamelCase__ =[[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
UpperCamelCase__ =0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
UpperCamelCase__ =int(input('Enter source:'))
UpperCamelCase__ =int(input('Enter destination:'))
UpperCamelCase__ =float(input('Enter weight:'))
UpperCamelCase__ =weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0 | 325 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = ['image_processor', 'tokenizer']
__snake_case = 'BlipImageProcessor'
__snake_case = 'AutoTokenizer'
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__(__lowerCamelCase , __lowerCamelCase )
# add QFormer tokenizer
_SCREAMING_SNAKE_CASE : List[str] = qformer_tokenizer
def __call__( self , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = True , __lowerCamelCase = False , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = True , __lowerCamelCase = None , **__lowerCamelCase , ) -> BatchFeature:
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
_SCREAMING_SNAKE_CASE : Any = BatchFeature()
if text is not None:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
encoding.update(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = self.qformer_tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : str = qformer_text_encoding.pop("input_ids" )
_SCREAMING_SNAKE_CASE : List[Any] = qformer_text_encoding.pop("attention_mask" )
if images is not None:
_SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
encoding.update(__lowerCamelCase )
return encoding
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> str:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCamelCase_ ( self , __lowerCamelCase , **__lowerCamelCase ) -> Any:
if os.path.isfile(__lowerCamelCase ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(__lowerCamelCase )
return super().save_pretrained(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def UpperCamelCase_ ( cls , __lowerCamelCase , **__lowerCamelCase ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , subfolder="qformer_tokenizer" )
_SCREAMING_SNAKE_CASE : Optional[Any] = cls._get_arguments_from_pretrained(__lowerCamelCase , **__lowerCamelCase )
args.append(__lowerCamelCase )
return cls(*__lowerCamelCase ) | 325 | 1 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
UpperCAmelCase_ : Optional[Any] = {
'/attention/': '/0/SelfAttention/',
'/self_attention/': '/0/SelfAttention/',
'/encoder_decoder_attention/': '/1/EncDecAttention/',
'value': 'v',
'query': 'q',
'key': 'k',
'out': 'o',
'pre_self_attention_layer_norm': '0/layer_norm',
'pre_cross_attention_layer_norm': '1/layer_norm',
'pre_attention_layer_norm': '0/layer_norm', # previously 1, but seems wrong
'token_embedder': 'shared',
'encoder_norm': 'final_layer_norm',
'decoder_norm': 'final_layer_norm',
'relpos_bias/rel_embedding': 'block/0/layer/0/SelfAttention/relative_attention_bias/weight',
'router/router_weights/w/': 'router/classifier/',
'roer/roer_weights/w/': 'router/classifier/',
'logits_dense': 'lm_head',
}
def SCREAMING_SNAKE_CASE_ ( __A : List[str] ) -> List[Any]:
"""simple docstring"""
a_ : Union[str, Any] = list(s_dict.keys() )
for key in keys:
a_ : Union[str, Any] = R'.*/layers_(\d+)'
a_ : Optional[int] = key
if re.match(__A , __A ):
a_ : Any = re.sub(R'layers_(\d+)' , R'block/\1/layer' , __A )
a_ : int = R'(encoder|decoder)\/'
if re.match(__A , __A ):
a_ : List[str] = re.match(__A , __A ).groups()
if groups[0] == "encoder":
a_ : int = re.sub(R'/mlp/' , R'/1/mlp/' , __A )
a_ : Optional[int] = re.sub(R'/pre_mlp_layer_norm/' , R'/1/layer_norm/' , __A )
elif groups[0] == "decoder":
a_ : List[str] = re.sub(R'/mlp/' , R'/2/mlp/' , __A )
a_ : List[Any] = re.sub(R'/pre_mlp_layer_norm/' , R'/2/layer_norm/' , __A )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
a_ : List[Any] = new_key.replace(__A , __A )
print(F"""{key} -> {new_key}""" )
a_ : List[Any] = s_dict.pop(__A )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
a_ : int = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
a_ : int = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
a_ : List[Any] = s_dict[key].shape[0]
a_ : str = s_dict[key]
for idx in range(__A ):
a_ : str = expert_weihts[idx]
print(F"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(__A )
return s_dict
UpperCAmelCase_ : Optional[Any] = {
'NUM_ENCODER_LAYERS': 'num_layers',
'NUM_DECODER_LAYERS': 'num_decoder_layers',
'NUM_HEADS': 'num_heads',
'HEAD_DIM': 'd_kv',
'EMBED_DIM': 'd_model',
'MLP_DIM': 'd_ff',
'NUM_SELECTED_EXPERTS': 'num_selected_experts',
'NUM_ENCODER_SPARSE_LAYERS': 'num_sparse_encoder_layers',
'NUM_DECODER_SPARSE_LAYERS': 'num_sparse_decoder_layers',
'dense.MlpBlock.activations': 'feed_forward_proj',
}
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : Tuple ) -> List[Any]:
"""simple docstring"""
import regex as re
with open(__A , 'r' ) as f:
a_ : int = f.read()
a_ : Optional[int] = re.findall(R'(.*) = ([0-9.]*)' , __A )
a_ : Optional[int] = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
a_ : int = float(__A ) if '.' in value else int(__A )
a_ : int = re.findall(R'(.*activations) = \(\'(.*)\',\)' , __A )[0]
a_ : int = str(activation[1] )
a_ : Union[str, Any] = num_experts
a_ : str = SwitchTransformersConfig(**__A )
return config
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] , __A : Union[str, Any] , __A : Optional[int]=None , __A : Dict="./" , __A : Optional[int]=8 ) -> Union[str, Any]:
"""simple docstring"""
print(F"""Loading flax weights from : {flax_checkpoint_path}""" )
a_ : Any = checkpoints.load_tax_checkpoint(__A )
if gin_file is not None:
a_ : Tuple = convert_gin_to_config(__A , __A )
else:
a_ : Dict = SwitchTransformersConfig.from_pretrained(__A )
a_ : Union[str, Any] = SwitchTransformersForConditionalGeneration(__A )
a_ : Tuple = flax_params['target']
a_ : Union[str, Any] = flatten_dict(__A , sep='/' )
a_ : Dict = rename_keys(__A )
a_ : List[str] = unflatten_dict(__A , sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(__A , __A )
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(__A )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'
' model architecture. If not provided, a `gin_file` has to be provided.'
),
)
parser.add_argument(
'--gin_file',
default=None,
type=str,
required=False,
help='Path to the gin config file. If not provided, a `config_file` has to be passed ',
)
parser.add_argument(
'--config_name', default=None, type=str, required=False, help='Config name of SwitchTransformers model.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output pytorch model.'
)
parser.add_argument('--num_experts', default=8, type=int, required=False, help='Number of experts')
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 32 | import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
a_ = parser.parse_args()
if args.model_type == "roberta":
a_ = RobertaForMaskedLM.from_pretrained(args.model_name)
a_ = 'roberta'
elif args.model_type == "gpt2":
a_ = GPTaLMHeadModel.from_pretrained(args.model_name)
a_ = 'transformer'
a_ = model.state_dict()
a_ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
a_ = state_dict[F"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
a_ = F"""{prefix}.embeddings.{w}.weight"""
a_ = state_dict[param_name]
for w in ["weight", "bias"]:
a_ = F"""{prefix}.embeddings.LayerNorm.{w}"""
a_ = state_dict[param_name]
# Transformer Blocks #
a_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
a_ = state_dict[
F"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
a_ = state_dict[F"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
a_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
a_ = state_dict[F"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
a_ = state_dict[F"""lm_head.dense.{w}"""]
a_ = state_dict[F"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
a_ = state_dict[F"""{prefix}.ln_f.{w}"""]
a_ = state_dict['lm_head.weight']
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 175 | 0 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = TaConfig.from_json_file(a__ )
print(F"""Building PyTorch model from configuration: {config}""" )
SCREAMING_SNAKE_CASE : Dict = TaForConditionalGeneration(a__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(a__ , a__ , a__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(a__ )
if __name__ == "__main__":
a__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a__ : Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 19 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
a__ : Any = TypeVar('''T''')
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return (position - 1) // 2
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return (2 * position) + 1
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return (2 * position) + 2
class a_ ( Generic[T] ):
"""simple docstring"""
def __init__( self ) ->None:
SCREAMING_SNAKE_CASE : list[tuple[T, int]] = []
SCREAMING_SNAKE_CASE : dict[T, int] = {}
SCREAMING_SNAKE_CASE : int = 0
def __len__( self ) ->int:
return self.elements
def __repr__( self ) ->str:
return str(self.heap )
def __lowerCAmelCase ( self ) ->bool:
# Check if the priority queue is empty
return self.elements == 0
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
SCREAMING_SNAKE_CASE : Tuple = self.elements
self.elements += 1
self._bubble_up(_lowerCamelCase )
def __lowerCAmelCase ( self ) ->T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.heap[0]
self._bubble_down(_lowerCamelCase )
return elem
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->None:
# Update the weight of the given key
SCREAMING_SNAKE_CASE : List[Any] = self.position_map[elem]
SCREAMING_SNAKE_CASE : Any = (elem, weight)
if position > 0:
SCREAMING_SNAKE_CASE : List[Any] = get_parent_position(_lowerCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_lowerCamelCase )
else:
self._bubble_down(_lowerCamelCase )
else:
self._bubble_down(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
SCREAMING_SNAKE_CASE : Optional[Any] = self.position_map[elem]
if curr_pos == 0:
return None
SCREAMING_SNAKE_CASE : str = get_parent_position(_lowerCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.heap[curr_pos]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_lowerCamelCase , _lowerCamelCase )
return self._bubble_up(_lowerCamelCase )
return None
def __lowerCAmelCase ( self , _lowerCamelCase ) ->None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
SCREAMING_SNAKE_CASE : Optional[Any] = self.position_map[elem]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.heap[curr_pos]
SCREAMING_SNAKE_CASE : List[str] = get_child_left_position(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = get_child_right_position(_lowerCamelCase )
if child_left_position < self.elements and child_right_position < self.elements:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.heap[child_left_position]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_lowerCamelCase , _lowerCamelCase )
return self._bubble_down(_lowerCamelCase )
if child_left_position < self.elements:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_lowerCamelCase , _lowerCamelCase )
return self._bubble_down(_lowerCamelCase )
else:
return None
if child_right_position < self.elements:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_lowerCamelCase , _lowerCamelCase )
return self._bubble_down(_lowerCamelCase )
return None
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->None:
# Swap the nodes at the given positions
SCREAMING_SNAKE_CASE : Optional[int] = self.heap[nodea_pos][0]
SCREAMING_SNAKE_CASE : Any = self.heap[nodea_pos][0]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
SCREAMING_SNAKE_CASE : Optional[int] = nodea_pos
SCREAMING_SNAKE_CASE : List[str] = nodea_pos
class a_ ( Generic[T] ):
"""simple docstring"""
def __init__( self ) ->None:
SCREAMING_SNAKE_CASE : dict[T, dict[T, int]] = {}
SCREAMING_SNAKE_CASE : int = 0
def __repr__( self ) ->str:
return str(self.connections )
def __len__( self ) ->int:
return self.nodes
def __lowerCAmelCase ( self , _lowerCamelCase ) ->None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
SCREAMING_SNAKE_CASE : Any = {}
self.nodes += 1
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->None:
# Add an edge between 2 nodes in the graph
self.add_node(_lowerCamelCase )
self.add_node(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = weight
SCREAMING_SNAKE_CASE : str = weight
def UpperCAmelCase_( a__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : dict[T, int] = {node: maxsize for node in graph.connections}
SCREAMING_SNAKE_CASE : dict[T, T | None] = {node: None for node in graph.connections}
SCREAMING_SNAKE_CASE : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(a__ , a__ )
if priority_queue.is_empty():
return dist, parent
# initialization
SCREAMING_SNAKE_CASE : List[Any] = priority_queue.extract_min()
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
SCREAMING_SNAKE_CASE : Any = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(a__ , dist[neighbour] )
SCREAMING_SNAKE_CASE : str = node
# running prim's algorithm
while not priority_queue.is_empty():
SCREAMING_SNAKE_CASE : List[str] = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
SCREAMING_SNAKE_CASE : List[Any] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(a__ , dist[neighbour] )
SCREAMING_SNAKE_CASE : str = node
return dist, parent
| 19 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
a_ : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ : Optional[Any] = """
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")
>>> repo = \"openai/shap-e-img2img\"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"
>>> image = load_image(image_url).convert(\"RGB\")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")
```
"""
@dataclass
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Union[PIL.Image.Image, np.ndarray]
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
super().__init__()
self.register_modules(
prior=lowerCAmelCase, image_encoder=lowerCAmelCase, image_processor=lowerCAmelCase, scheduler=lowerCAmelCase, renderer=lowerCAmelCase, )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
if latents is None:
lowerCamelCase_ =randn_tensor(lowerCAmelCase, generator=lowerCAmelCase, device=lowerCAmelCase, dtype=lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowerCamelCase_ =latents.to(lowerCAmelCase )
lowerCamelCase_ =latents * scheduler.init_noise_sigma
return latents
def lowercase__ ( self, lowerCAmelCase=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowerCamelCase_ =torch.device(f'''cuda:{gpu_id}''' )
lowerCamelCase_ =[self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase, lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder, '''_hf_hook''' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowerCAmelCase, '''_hf_hook''' )
and hasattr(module._hf_hook, '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
if isinstance(lowerCAmelCase, lowerCAmelCase ) and isinstance(image[0], torch.Tensor ):
lowerCamelCase_ =torch.cat(lowerCAmelCase, axis=0 ) if image[0].ndim == 4 else torch.stack(lowerCAmelCase, axis=0 )
if not isinstance(lowerCAmelCase, torch.Tensor ):
lowerCamelCase_ =self.image_processor(lowerCAmelCase, return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 )
lowerCamelCase_ =image.to(dtype=self.image_encoder.dtype, device=lowerCAmelCase )
lowerCamelCase_ =self.image_encoder(lowerCAmelCase )['''last_hidden_state''']
lowerCamelCase_ =image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCamelCase_ =image_embeds.repeat_interleave(lowerCAmelCase, dim=0 )
if do_classifier_free_guidance:
lowerCamelCase_ =torch.zeros_like(lowerCAmelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ =torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowerCAmelCase )
def __call__( self, lowerCAmelCase, lowerCAmelCase = 1, lowerCAmelCase = 25, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 4.0, lowerCAmelCase = 64, lowerCAmelCase = "pil", lowerCAmelCase = True, ):
"""simple docstring"""
if isinstance(lowerCAmelCase, PIL.Image.Image ):
lowerCamelCase_ =1
elif isinstance(lowerCAmelCase, torch.Tensor ):
lowerCamelCase_ =image.shape[0]
elif isinstance(lowerCAmelCase, lowerCAmelCase ) and isinstance(image[0], (torch.Tensor, PIL.Image.Image) ):
lowerCamelCase_ =len(lowerCAmelCase )
else:
raise ValueError(
f'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowerCAmelCase )}''' )
lowerCamelCase_ =self._execution_device
lowerCamelCase_ =batch_size * num_images_per_prompt
lowerCamelCase_ =guidance_scale > 1.0
lowerCamelCase_ =self._encode_image(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
# prior
self.scheduler.set_timesteps(lowerCAmelCase, device=lowerCAmelCase )
lowerCamelCase_ =self.scheduler.timesteps
lowerCamelCase_ =self.prior.config.num_embeddings
lowerCamelCase_ =self.prior.config.embedding_dim
lowerCamelCase_ =self.prepare_latents(
(batch_size, num_embeddings * embedding_dim), image_embeds.dtype, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, self.scheduler, )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCamelCase_ =latents.reshape(latents.shape[0], lowerCAmelCase, lowerCAmelCase )
for i, t in enumerate(self.progress_bar(lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ =self.scheduler.scale_model_input(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =self.prior(
lowerCAmelCase, timestep=lowerCAmelCase, proj_embedding=lowerCAmelCase, ).predicted_image_embedding
# remove the variance
lowerCamelCase_, lowerCamelCase_ =noise_pred.split(
scaled_model_input.shape[2], dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCamelCase_, lowerCamelCase_ =noise_pred.chunk(2 )
lowerCamelCase_ =noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCamelCase_ =self.scheduler.step(
lowerCAmelCase, timestep=lowerCAmelCase, sample=lowerCAmelCase, ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowerCAmelCase )
lowerCamelCase_ =[]
for i, latent in enumerate(lowerCAmelCase ):
print()
lowerCamelCase_ =self.renderer.decode(
latent[None, :], lowerCAmelCase, size=lowerCAmelCase, ray_batch_size=4_096, n_coarse_samples=64, n_fine_samples=128, )
images.append(lowerCAmelCase )
lowerCamelCase_ =torch.stack(lowerCAmelCase )
if output_type not in ["np", "pil"]:
raise ValueError(f'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
lowerCamelCase_ =images.cpu().numpy()
if output_type == "pil":
lowerCamelCase_ =[self.numpy_to_pil(lowerCAmelCase ) for image in images]
# Offload last model to CPU
if hasattr(self, '''final_offload_hook''' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowerCAmelCase )
| 75 |
"""simple docstring"""
from __future__ import annotations
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
UpperCAmelCase : Any = data
UpperCAmelCase : Node | None = None
UpperCAmelCase : Node | None = None
def _snake_case ( UpperCamelCase : Node | None ): # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def _snake_case ( UpperCamelCase : Node | None ):
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def _snake_case ( UpperCamelCase : Node ):
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def _snake_case ( ): # Main function for testing.
UpperCAmelCase : int = Node(1 )
UpperCAmelCase : Tuple = Node(2 )
UpperCAmelCase : Any = Node(3 )
UpperCAmelCase : Optional[int] = Node(4 )
UpperCAmelCase : Any = Node(5 )
UpperCAmelCase : Optional[int] = Node(6 )
UpperCAmelCase : int = Node(7 )
UpperCAmelCase : str = Node(8 )
UpperCAmelCase : str = Node(9 )
print(is_full_binary_tree(UpperCamelCase ) )
print(depth_of_tree(UpperCamelCase ) )
print("""Tree is: """ )
display(UpperCamelCase )
if __name__ == "__main__":
main()
| 109 | 0 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class UpperCAmelCase ( _a ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase_ : int=0.01 , lowerCAmelCase_ : Tuple=1_0_0_0 ):
"""simple docstring"""
_A: Union[str, Any] = p_stop
_A: Dict = max_length
def __iter__( self : Union[str, Any] ):
"""simple docstring"""
_A: int = 0
_A: Any = False
while not stop and count < self.max_length:
yield count
count += 1
_A: List[str] = random.random() < self.p_stop
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : Optional[int]=True ):
"""simple docstring"""
_A: Union[str, Any] = [
BatchSamplerShard(_a , 2 , _a , split_batches=_a , even_batches=_a )
for i in range(2 )
]
_A: Union[str, Any] = [list(_a ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(_a ) for shard in batch_sampler_shards] , [len(_a ) for e in expected] )
self.assertListEqual(_a , _a )
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
# Check the shards when the dataset is a round multiple of total batch size.
_A: Optional[int] = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=_a )
_A: int = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(_a , _a )
_A: Tuple = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=_a )
# Expected shouldn't change
self.check_batch_sampler_shards(_a , _a )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_A: int = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=_a )
_A: int = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [0, 1, 2]],
]
self.check_batch_sampler_shards(_a , _a )
_A: List[str] = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=_a )
_A: List[str] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_a , _a )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_A: Dict = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=_a )
_A: str = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 0, 1]],
]
self.check_batch_sampler_shards(_a , _a )
_A: Optional[int] = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=_a )
_A: int = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_a , _a )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_A: List[Any] = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=_a )
_A: str = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [1, 2, 3]],
]
self.check_batch_sampler_shards(_a , _a )
_A: Optional[Any] = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=_a )
_A: str = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_a , _a )
# Check the shards when the dataset is very small.
_A: str = BatchSampler(range(2 ) , batch_size=3 , drop_last=_a )
_A: List[str] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(_a , _a )
_A: Any = BatchSampler(range(2 ) , batch_size=3 , drop_last=_a )
_A: Tuple = [[], []]
self.check_batch_sampler_shards(_a , _a )
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
# Check the shards when the dataset is a round multiple of batch size.
_A: List[Any] = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=_a )
_A: Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a )
_A: str = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=_a )
# Expected shouldn't change
self.check_batch_sampler_shards(_a , _a , split_batches=_a )
# Check the shards when the dataset is not a round multiple of batch size.
_A: List[Any] = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=_a )
_A: Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [0, 1]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a )
_A: str = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=_a )
_A: int = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_A: Any = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=_a )
_A: Any = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [1, 2]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a )
_A: Tuple = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=_a )
_A: Optional[int] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a )
# Check the shards when the dataset is very small.
_A: Optional[int] = BatchSampler(range(2 ) , batch_size=4 , drop_last=_a )
_A: Dict = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(_a , _a , split_batches=_a )
_A: List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=_a )
_A: List[Any] = [[], []]
self.check_batch_sampler_shards(_a , _a , split_batches=_a )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
# Check the shards when the dataset is a round multiple of total batch size.
_A: str = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=_a )
_A: Optional[int] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
_A: Optional[Any] = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=_a )
# Expected shouldn't change
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_A: Optional[Any] = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=_a )
_A: Tuple = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
_A: Tuple = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=_a )
_A: str = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_A: List[str] = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=_a )
_A: Tuple = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1]],
]
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
_A: Optional[Any] = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=_a )
_A: Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_A: Optional[Any] = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=_a )
_A: Tuple = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
_A: List[str] = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=_a )
_A: Dict = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
# Check the shards when the dataset is very small.
_A: Optional[int] = BatchSampler(range(2 ) , batch_size=3 , drop_last=_a )
_A: Optional[Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
_A: Tuple = BatchSampler(range(2 ) , batch_size=3 , drop_last=_a )
_A: Optional[Any] = [[], []]
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
# Check the shards when the dataset is a round multiple of batch size.
_A: Optional[int] = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=_a )
_A: Optional[int] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a , even_batches=_a )
_A: Union[str, Any] = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=_a )
# Expected shouldn't change
self.check_batch_sampler_shards(_a , _a , split_batches=_a , even_batches=_a )
# Check the shards when the dataset is not a round multiple of batch size.
_A: Dict = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=_a )
_A: Tuple = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a , even_batches=_a )
_A: Tuple = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=_a )
_A: Dict = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a , even_batches=_a )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_A: Optional[Any] = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=_a )
_A: List[Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a , even_batches=_a )
_A: int = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=_a )
_A: List[str] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a , even_batches=_a )
# Check the shards when the dataset is very small.
_A: int = BatchSampler(range(2 ) , batch_size=4 , drop_last=_a )
_A: List[Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(_a , _a , split_batches=_a , even_batches=_a )
_A: Optional[int] = BatchSampler(range(2 ) , batch_size=4 , drop_last=_a )
_A: int = [[], []]
self.check_batch_sampler_shards(_a , _a , split_batches=_a , even_batches=_a )
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
_A: List[Any] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 1_0, 1_1], [1_2, 1_3]]
_A: List[str] = [BatchSamplerShard(_a , 2 , _a , even_batches=_a ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [1_2, 1_3]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 1_0, 1_1]] )
def __magic_name__ ( self : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : List[Any]=2 , lowerCAmelCase_ : Optional[Any]=False ):
"""simple docstring"""
random.seed(_a )
_A: int = list(_a )
_A: List[str] = [
IterableDatasetShard(
_a , batch_size=_a , drop_last=_a , num_processes=_a , process_index=_a , split_batches=_a , )
for i in range(_a )
]
_A: List[str] = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(_a )
iterable_dataset_lists.append(list(_a ) )
_A: Optional[int] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_A: Optional[Any] = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(_a ) , len(_a ) )
self.assertTrue(len(_a ) % shard_batch_size == 0 )
_A: Tuple = []
for idx in range(0 , len(_a ) , _a ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(_a ) < len(_a ):
reference += reference
self.assertListEqual(_a , reference[: len(_a )] )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: List[Any] = 4_2
_A: int = RandomIterableDataset()
self.check_iterable_dataset_shards(_a , _a , batch_size=4 , drop_last=_a , split_batches=_a )
self.check_iterable_dataset_shards(_a , _a , batch_size=4 , drop_last=_a , split_batches=_a )
self.check_iterable_dataset_shards(_a , _a , batch_size=4 , drop_last=_a , split_batches=_a )
self.check_iterable_dataset_shards(_a , _a , batch_size=4 , drop_last=_a , split_batches=_a )
# Edge case with a very small dataset
_A: str = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(_a , _a , batch_size=4 , drop_last=_a , split_batches=_a )
self.check_iterable_dataset_shards(_a , _a , batch_size=4 , drop_last=_a , split_batches=_a )
self.check_iterable_dataset_shards(_a , _a , batch_size=4 , drop_last=_a , split_batches=_a )
self.check_iterable_dataset_shards(_a , _a , batch_size=4 , drop_last=_a , split_batches=_a )
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
_A: List[str] = BatchSampler(range(1_6 ) , batch_size=4 , drop_last=_a )
_A: Any = SkipBatchSampler(_a , 2 )
self.assertListEqual(list(_a ) , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: Tuple = SkipDataLoader(list(range(1_6 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: List[str] = DataLoader(list(range(1_6 ) ) , batch_size=4 )
_A: Any = skip_first_batches(_a , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: str = DataLoaderShard(list(range(1_6 ) ) , batch_size=4 )
for idx, _ in enumerate(_a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
Accelerator()
_A: List[Any] = DataLoaderDispatcher(range(1_6 ) , batch_size=4 )
for idx, _ in enumerate(_a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 370 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase__ : Union[str, Any] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 301 | 0 |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = None , **_UpperCamelCase , ) -> int:
UpperCAmelCase_ : Dict = path_or_paths
UpperCAmelCase_ : Union[str, Any] = split if split or isinstance(_UpperCamelCase , _UpperCamelCase ) else 'train'
UpperCAmelCase_ : Dict = features
UpperCAmelCase_ : Optional[int] = cache_dir
UpperCAmelCase_ : int = keep_in_memory
UpperCAmelCase_ : List[str] = streaming
UpperCAmelCase_ : Any = num_proc
UpperCAmelCase_ : List[Any] = kwargs
@abstractmethod
def __UpperCAmelCase ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = None , **_UpperCamelCase , ) -> Any:
UpperCAmelCase_ : Any = features
UpperCAmelCase_ : List[Any] = cache_dir
UpperCAmelCase_ : List[Any] = keep_in_memory
UpperCAmelCase_ : Any = streaming
UpperCAmelCase_ : str = num_proc
UpperCAmelCase_ : Optional[Any] = kwargs
@abstractmethod
def __UpperCAmelCase ( self ) -> Union[Dataset, IterableDataset]:
pass
| 29 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__UpperCAmelCase = logging.getLogger(__name__)
def lowercase__ ( __snake_case : List[Any]=2 , __snake_case : Union[str, Any]=3 , __snake_case : Any=16 , __snake_case : int = 10 , __snake_case : int = 2 ):
'''simple docstring'''
def get_dataset(__snake_case : Optional[Any] ):
UpperCAmelCase_ : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCAmelCase_ : Any = get_dataset(__snake_case )
UpperCAmelCase_ : str = get_dataset(__snake_case )
UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowercase__ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple=None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = []
for epoch in range(__snake_case ):
# Train quickly
model.train()
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = batch
UpperCAmelCase_ : List[Any] = model(__snake_case )
UpperCAmelCase_ : int = torch.nn.functional.mse_loss(__snake_case , __snake_case )
accelerator.backward(__snake_case )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class lowerCamelCase (nn.Module ):
'''simple docstring'''
def __init__( self ) -> Optional[Any]:
super().__init__()
UpperCAmelCase_ : List[Any] = nn.Parameter(torch.randn(1 ) )
UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.randn(1 ) )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[Any]:
return x * self.a + self.b
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders()
UpperCAmelCase_ : Optional[int] = ProjectConfiguration(total_limit=1 , project_dir=_UpperCamelCase , automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : Dict = Accelerator(project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Optional[Any] = DummyModel()
UpperCAmelCase_ : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders()
# Train baseline
UpperCAmelCase_ : Tuple = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
UpperCAmelCase_ : Any = os.path.join(_UpperCamelCase , 'initial' )
accelerator.save_state(_UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : Dict = optimizer.state_dict()
UpperCAmelCase_ : Union[str, Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Union[str, Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Any = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase_ : int = DummyModel()
UpperCAmelCase_ : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : str = dummy_dataloaders()
UpperCAmelCase_ : Optional[Any] = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.load_state(_UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : List[str] = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Dict = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save everything
UpperCAmelCase_ : Union[str, Any] = os.path.join(_UpperCamelCase , 'checkpoint' )
accelerator.save_state(_UpperCamelCase )
# Load everything back in and make sure all states work
accelerator.load_state(_UpperCamelCase )
test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dummy_dataloaders()
UpperCAmelCase_ : Any = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : str = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
UpperCAmelCase_ : Optional[Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Tuple = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase_ : Any = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders()
UpperCAmelCase_ : Tuple = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) )
((UpperCAmelCase_) , (UpperCAmelCase_)) : str = model.a.item(), model.b.item()
UpperCAmelCase_ : List[Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : List[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Dict = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Optional[Any] = torch.tensor([1, 2, 3] )
UpperCAmelCase_ : Any = torch.tensor([2, 3, 4] )
UpperCAmelCase_ : Union[str, Any] = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(net.parameters() )
UpperCAmelCase_ : Any = Accelerator()
with self.assertRaises(_UpperCamelCase ) as ve:
accelerator.register_for_checkpointing(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[int] = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : int = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ : Dict = torch.optim.lr_scheduler.StepLR(_UpperCamelCase , step_size=1 , gamma=0.99 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders()
UpperCAmelCase_ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : Tuple = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
UpperCAmelCase_ : Dict = scheduler.state_dict()
train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
self.assertNotEqual(_UpperCamelCase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(_UpperCamelCase , scheduler.state_dict() )
def __UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Optional[int] = DummyModel()
UpperCAmelCase_ : Dict = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase , total_limit=2 )
# Train baseline
UpperCAmelCase_ : Optional[int] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ : str = accelerator.prepare(_UpperCamelCase )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : List[str] = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
__UpperCAmelCase = '/tmp/accelerate/state_checkpointing'
__UpperCAmelCase = DummyModel()
__UpperCAmelCase = torch.optim.Adam(params=model.parameters(), lr=1E-3)
__UpperCAmelCase = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
__UpperCAmelCase , __UpperCAmelCase = dummy_dataloaders()
__UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__UpperCAmelCase = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert param_device.type == accelerator.device.type
__UpperCAmelCase = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 29 | 1 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__A = logging.get_logger(__name__)
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def __A ( _lowercase , _lowercase , _lowercase = None ):
'''simple docstring'''
_A = tesseract_config if tesseract_config is not None else ''''''
# apply OCR
_A = to_pil_image(_lowercase )
_A ,_A = pil_image.size
_A = pytesseract.image_to_data(_lowercase , lang=_lowercase , output_type='''dict''' , config=_lowercase )
_A ,_A ,_A ,_A ,_A = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
_A = [idx for idx, word in enumerate(_lowercase ) if not word.strip()]
_A = [word for idx, word in enumerate(_lowercase ) if idx not in irrelevant_indices]
_A = [coord for idx, coord in enumerate(_lowercase ) if idx not in irrelevant_indices]
_A = [coord for idx, coord in enumerate(_lowercase ) if idx not in irrelevant_indices]
_A = [coord for idx, coord in enumerate(_lowercase ) if idx not in irrelevant_indices]
_A = [coord for idx, coord in enumerate(_lowercase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_A = []
for x, y, w, h in zip(_lowercase , _lowercase , _lowercase , _lowercase ):
_A = [x, y, x + w, y + h]
actual_boxes.append(_lowercase )
# finally, normalize the bounding boxes
_A = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_lowercase , _lowercase , _lowercase ) )
assert len(_lowercase ) == len(_lowercase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = ["pixel_values"]
def __init__( self: str , __A: bool = True , __A: Dict[str, int] = None , __A: PILImageResampling = PILImageResampling.BILINEAR , __A: bool = True , __A: Optional[str] = None , __A: Optional[str] = "" , **__A: Tuple , ) -> None:
super().__init__(**__A )
_A = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
_A = get_size_dict(__A )
_A = do_resize
_A = size
_A = resample
_A = apply_ocr
_A = ocr_lang
_A = tesseract_config
def __A ( self: str , __A: np.ndarray , __A: Dict[str, int] , __A: PILImageResampling = PILImageResampling.BILINEAR , __A: Optional[Union[str, ChannelDimension]] = None , **__A: Optional[Any] , ) -> np.ndarray:
_A = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_A = (size['''height'''], size['''width'''])
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def __A ( self: Tuple , __A: ImageInput , __A: bool = None , __A: Dict[str, int] = None , __A: PILImageResampling = None , __A: bool = None , __A: Optional[str] = None , __A: Optional[str] = None , __A: Optional[Union[str, TensorType]] = None , __A: ChannelDimension = ChannelDimension.FIRST , **__A: int , ) -> PIL.Image.Image:
_A = do_resize if do_resize is not None else self.do_resize
_A = size if size is not None else self.size
_A = get_size_dict(__A )
_A = resample if resample is not None else self.resample
_A = apply_ocr if apply_ocr is not None else self.apply_ocr
_A = ocr_lang if ocr_lang is not None else self.ocr_lang
_A = tesseract_config if tesseract_config is not None else self.tesseract_config
_A = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
_A = [to_numpy_array(__A ) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''' )
_A = []
_A = []
for image in images:
_A ,_A = apply_tesseract(__A , __A , __A )
words_batch.append(__A )
boxes_batch.append(__A )
if do_resize:
_A = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
_A = [flip_channel_order(__A ) for image in images]
_A = [to_channel_dimension_format(__A , __A ) for image in images]
_A = BatchFeature(data={'''pixel_values''': images} , tensor_type=__A )
if apply_ocr:
_A = words_batch
_A = boxes_batch
return data
| 75 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
def __A ( self: str ) -> Any:
_A = tempfile.mkdtemp()
_A = 8
# DPR tok
_A = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_A = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(__A , exist_ok=__A )
_A = os.path.join(__A , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
_A = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_A = dict(zip(__A , range(len(__A ) ) ) )
_A = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_A = {'''unk_token''': '''<unk>'''}
_A = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(__A , exist_ok=__A )
_A = os.path.join(__A , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(__A , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__A ) )
def __A ( self: List[str] ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __A ( self: List[str] ) -> DPRContextEncoderTokenizer:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __A ( self: Tuple ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def __A ( self: Union[str, Any] ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def __A ( self: Dict ) -> Dict:
_A = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __A ( self: Dict ) -> Union[str, Any]:
_A = self.get_dummy_dataset()
_A = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_A = dataset
_A = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __A ( self: Optional[int] , __A: bool ) -> Any:
_A = self.get_dummy_dataset()
_A = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
_A = os.path.join(self.tmpdirname , '''dataset''' )
_A = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
_A = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
_A = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __A ) , )
return retriever
def __A ( self: Dict ) -> Dict:
_A = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
_A = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
_A = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
_A = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(__A , open(__A , '''wb''' ) )
_A = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
_A = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __A ( self: Tuple ) -> Optional[int]:
_A = 1
_A = self.get_dummy_canonical_hf_index_retriever()
_A = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A ,_A ,_A = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __A )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __A ( self: Any ) -> Optional[Any]:
_A = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_A = self.get_dummy_dataset()
retriever.save_pretrained(__A )
_A = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
_A = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def __A ( self: str ) -> Any:
_A = 1
_A = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
_A = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A ,_A ,_A = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __A )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __A ( self: int ) -> Optional[int]:
_A = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
_A = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
_A = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def __A ( self: str ) -> List[Any]:
_A = 1
_A = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
_A = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A ,_A ,_A = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __A )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __A ( self: List[Any] ) -> Any:
_A = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
_A = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
_A = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def __A ( self: Tuple ) -> List[Any]:
_A = 1
_A = self.get_dummy_legacy_index_retriever()
_A = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A ,_A ,_A = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , __A )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __A ( self: List[str] ) -> Optional[int]:
_A = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
_A = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
_A = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __A ( self: Tuple ) -> Union[str, Any]:
import torch
_A = 1
_A = self.get_dummy_canonical_hf_index_retriever()
_A = [[5, 7], [10, 11]]
_A = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A )
_A ,_A ,_A = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__A , __A )
self.assertIsInstance(__A , __A )
self.assertIsInstance(__A , np.ndarray )
_A = retriever(
__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A , return_tensors='''pt''' , )
_A ,_A ,_A ,_A = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__A , torch.Tensor )
self.assertIsInstance(__A , torch.Tensor )
self.assertIsInstance(__A , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __A ( self: int ) -> Dict:
_A = self.get_dpr_ctx_encoder_tokenizer()
_A = 1
_A = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
retriever.set_ctx_encoder_tokenizer(__A )
_A = [[5, 7], [10, 11]]
_A = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A )
self.assertEqual(
len(__A ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , __A ) # check for doc token related keys in dictionary.
| 75 | 1 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class _snake_case ( __snake_case ):
'''simple docstring'''
def __init__( self: int ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Tuple ) -> None:
warnings.warn(
"""The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use GLPNImageProcessor instead.""" ,lowerCamelCase_ ,)
super().__init__(*lowerCamelCase_ ,**lowerCamelCase_ )
| 345 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCamelCase_ = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCamelCase_ = {
'''allenai/led-base-16384''': 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : int = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCAmelCase_ : Dict = bs[:]
UpperCAmelCase_ : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_a )
cs.append(2**8 + n )
n += 1
UpperCAmelCase_ : Any = [chr(_a ) for n in cs]
return dict(zip(_a , _a ) )
def lowerCamelCase_ ( _a : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = set()
UpperCAmelCase_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ : Optional[int] = char
return pairs
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : str = VOCAB_FILES_NAMES
A__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self: Union[str, Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any]="replace" ,lowerCamelCase_: Optional[Any]="<s>" ,lowerCamelCase_: List[Any]="</s>" ,lowerCamelCase_: List[str]="</s>" ,lowerCamelCase_: int="<s>" ,lowerCamelCase_: int="<unk>" ,lowerCamelCase_: str="<pad>" ,lowerCamelCase_: Optional[Any]="<mask>" ,lowerCamelCase_: List[str]=False ,**lowerCamelCase_: Tuple ,) -> Any:
UpperCAmelCase_ : Union[str, Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else bos_token
UpperCAmelCase_ : int = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else eos_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else sep_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else cls_token
UpperCAmelCase_ : Optional[Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else unk_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : str = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else mask_token
super().__init__(
errors=lowerCamelCase_ ,bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,cls_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,mask_token=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,**lowerCamelCase_ ,)
with open(lowerCamelCase_ ,encoding="""utf-8""" ) as vocab_handle:
UpperCAmelCase_ : Union[str, Any] = json.load(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ : Any = errors # how to handle errors in decoding
UpperCAmelCase_ : int = bytes_to_unicode()
UpperCAmelCase_ : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ ,encoding="""utf-8""" ) as merges_handle:
UpperCAmelCase_ : Any = merges_handle.read().split("""\n""" )[1:-1]
UpperCAmelCase_ : int = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) )
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase_ : int = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self: List[str] ) -> List[str]:
return len(self.encoder )
def A__ ( self: Any ) -> Union[str, Any]:
return dict(self.encoder ,**self.added_tokens_encoder )
def A__ ( self: Tuple ,lowerCamelCase_: Dict ) -> Optional[Any]:
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ : Union[str, Any] = tuple(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
UpperCAmelCase_ : Union[str, Any] = min(lowerCamelCase_ ,key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ , UpperCAmelCase_ : Any = bigram
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : List[str] = 0
while i < len(lowerCamelCase_ ):
try:
UpperCAmelCase_ : str = word.index(lowerCamelCase_ ,lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase_ : Union[str, Any] = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_ : List[str] = tuple(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
UpperCAmelCase_ : List[str] = get_pairs(lowerCamelCase_ )
UpperCAmelCase_ : int = """ """.join(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = word
return word
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ) -> List[str]:
UpperCAmelCase_ : str = []
for token in re.findall(self.pat ,lowerCamelCase_ ):
UpperCAmelCase_ : List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) )
return bpe_tokens
def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ) -> Optional[int]:
return self.encoder.get(lowerCamelCase_ ,self.encoder.get(self.unk_token ) )
def A__ ( self: List[str] ,lowerCamelCase_: str ) -> Optional[Any]:
return self.decoder.get(lowerCamelCase_ )
def A__ ( self: List[str] ,lowerCamelCase_: List[str] ) -> List[Any]:
UpperCAmelCase_ : str = """""".join(lowerCamelCase_ )
UpperCAmelCase_ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def A__ ( self: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : List[Any] = os.path.join(
lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase_ : List[str] = os.path.join(
lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCamelCase_ ,ensure_ascii=lowerCamelCase_ ) + """\n""" )
UpperCAmelCase_ : str = 0
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
UpperCAmelCase_ : Tuple = token_index
writer.write(""" """.join(lowerCamelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
UpperCAmelCase_ : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self: Union[str, Any] ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ,lowerCamelCase_: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ ,token_ids_a=lowerCamelCase_ ,already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase_ : Optional[Any] = [self.sep_token_id]
UpperCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str=False ,**lowerCamelCase_: List[str] ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()):
UpperCAmelCase_ : Dict = """ """ + text
return (text, kwargs)
def A__ ( self: List[str] ,lowerCamelCase_: Union[Dict[str, EncodedInput], BatchEncoding] ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: Optional[bool] = None ,) -> dict:
UpperCAmelCase_ : Optional[int] = super()._pad(
encoded_inputs=lowerCamelCase_ ,max_length=lowerCamelCase_ ,padding_strategy=lowerCamelCase_ ,pad_to_multiple_of=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,)
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase_ : str = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase_ : str = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase_ : List[Any] = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase_ )
if needs_to_be_padded:
UpperCAmelCase_ : Dict = len(lowerCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase_ : str = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase_ : List[str] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 345 | 1 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowerCAmelCase__ = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE : Optional[int] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = pipeline(
task='''text-classification''' ,model='''hf-internal-testing/tiny-random-distilbert''' ,framework='''pt''' )
__lowercase = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowercase__ ) ,[{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}] )
__lowercase = text_classifier('''This is great !''' ,top_k=2 )
self.assertEqual(
nested_simplify(lowercase__ ) ,[{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}] )
__lowercase = text_classifier(['''This is great !''', '''This is bad'''] ,top_k=2 )
self.assertEqual(
nested_simplify(lowercase__ ) ,[
[{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}],
[{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}],
] ,)
__lowercase = text_classifier('''This is great !''' ,top_k=1 )
self.assertEqual(nested_simplify(lowercase__ ) ,[{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}] )
# Legacy behavior
__lowercase = text_classifier('''This is great !''' ,return_all_scores=lowercase__ )
self.assertEqual(nested_simplify(lowercase__ ) ,[{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}] )
__lowercase = text_classifier('''This is great !''' ,return_all_scores=lowercase__ )
self.assertEqual(
nested_simplify(lowercase__ ) ,[[{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}]] )
__lowercase = text_classifier(['''This is great !''', '''Something else'''] ,return_all_scores=lowercase__ )
self.assertEqual(
nested_simplify(lowercase__ ) ,[
[{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}],
[{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}],
] ,)
__lowercase = text_classifier(['''This is great !''', '''Something else'''] ,return_all_scores=lowercase__ )
self.assertEqual(
nested_simplify(lowercase__ ) ,[
{'''label''': '''LABEL_0''', '''score''': 0.5_0_4},
{'''label''': '''LABEL_0''', '''score''': 0.5_0_4},
] ,)
@require_torch
def SCREAMING_SNAKE_CASE ( self : Tuple ):
import torch
__lowercase = pipeline(
task='''text-classification''' ,model='''hf-internal-testing/tiny-random-distilbert''' ,framework='''pt''' ,device=torch.device('''cpu''' ) ,)
__lowercase = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowercase__ ) ,[{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}] )
@require_tf
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = pipeline(
task='''text-classification''' ,model='''hf-internal-testing/tiny-random-distilbert''' ,framework='''tf''' )
__lowercase = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowercase__ ) ,[{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}] )
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = pipeline('''text-classification''' )
__lowercase = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowercase__ ) ,[{'''label''': '''POSITIVE''', '''score''': 1.0}] )
__lowercase = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(lowercase__ ) ,[{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
__lowercase = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(lowercase__ ) ,[{'''label''': '''POSITIVE''', '''score''': 0.9_8_8}] )
@slow
@require_tf
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = pipeline('''text-classification''' ,framework='''tf''' )
__lowercase = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowercase__ ) ,[{'''label''': '''POSITIVE''', '''score''': 1.0}] )
__lowercase = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(lowercase__ ) ,[{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
__lowercase = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(lowercase__ ) ,[{'''label''': '''POSITIVE''', '''score''': 0.9_8_8}] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : int ,lowercase__ : List[Any] ):
__lowercase = TextClassificationPipeline(model=lowercase__ ,tokenizer=lowercase__ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[int] ,lowercase__ : str ):
__lowercase = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__lowercase = '''HuggingFace is in'''
__lowercase = text_classifier(lowercase__ )
self.assertEqual(nested_simplify(lowercase__ ) ,[{'''label''': ANY(lowercase__ ), '''score''': ANY(lowercase__ )}] )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
__lowercase = ['''HuggingFace is in ''', '''Paris is in France''']
__lowercase = text_classifier(lowercase__ )
self.assertEqual(
nested_simplify(lowercase__ ) ,[{'''label''': ANY(lowercase__ ), '''score''': ANY(lowercase__ )}, {'''label''': ANY(lowercase__ ), '''score''': ANY(lowercase__ )}] ,)
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['''label'''] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__lowercase = text_classifier(lowercase__ ,top_k=lowercase__ )
__lowercase = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(lowercase__ ) ,[[{'''label''': ANY(lowercase__ ), '''score''': ANY(lowercase__ )}] * N, [{'''label''': ANY(lowercase__ ), '''score''': ANY(lowercase__ )}] * N] ,)
__lowercase = {'''text''': '''HuggingFace is in ''', '''text_pair''': '''Paris is in France'''}
__lowercase = text_classifier(lowercase__ )
self.assertEqual(
nested_simplify(lowercase__ ) ,{'''label''': ANY(lowercase__ ), '''score''': ANY(lowercase__ )} ,)
self.assertTrue(outputs['''label'''] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__lowercase = [['''HuggingFace is in ''', '''Paris is in France''']]
with self.assertRaises(lowercase__ ):
text_classifier(lowercase__ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__lowercase = text_classifier([[['''HuggingFace is in ''', '''Paris is in France''']]] )
self.assertEqual(
nested_simplify(lowercase__ ) ,[{'''label''': ANY(lowercase__ ), '''score''': ANY(lowercase__ )}] ,)
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
| 52 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 'gpt_neo'
SCREAMING_SNAKE_CASE : Any = ['past_key_values']
SCREAMING_SNAKE_CASE : Union[str, Any] = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : Any ,lowercase__ : Tuple=5_0_2_5_7 ,lowercase__ : Union[str, Any]=2_0_4_8 ,lowercase__ : List[Any]=2_0_4_8 ,lowercase__ : Optional[Any]=2_4 ,lowercase__ : Union[str, Any]=[[["global", "local"], 1_2]] ,lowercase__ : List[Any]=1_6 ,lowercase__ : Optional[Any]=None ,lowercase__ : Optional[int]=2_5_6 ,lowercase__ : Union[str, Any]="gelu_new" ,lowercase__ : Tuple=0.0 ,lowercase__ : List[str]=0.0 ,lowercase__ : Dict=0.0 ,lowercase__ : Union[str, Any]=0.1 ,lowercase__ : List[str]=1e-5 ,lowercase__ : Dict=0.0_2 ,lowercase__ : str=True ,lowercase__ : int=5_0_2_5_6 ,lowercase__ : Any=5_0_2_5_6 ,**lowercase__ : Optional[Any] ,):
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = hidden_size
__lowercase = num_layers
__lowercase = num_heads
__lowercase = intermediate_size
__lowercase = window_size
__lowercase = activation_function
__lowercase = resid_dropout
__lowercase = embed_dropout
__lowercase = attention_dropout
__lowercase = classifier_dropout
__lowercase = layer_norm_epsilon
__lowercase = initializer_range
__lowercase = use_cache
__lowercase = bos_token_id
__lowercase = eos_token_id
__lowercase = attention_types
__lowercase = self.expand_attention_types_params(lowercase__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.attention_layers)` == `config.num_layers` '''
F"but is `len(config.attention_layers) = {len(self.attention_layers )}`, "
F"`config.num_layers = {self.num_layers}`. "
'''`config.attention_layers` is prepared using `config.attention_types`. '''
'''Please verify the value of `config.attention_types` argument.''' )
super().__init__(bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,**lowercase__ )
@staticmethod
def SCREAMING_SNAKE_CASE ( lowercase__ : Tuple ):
__lowercase = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
import torch
__lowercase = input.size()
__lowercase = len(A__ )
__lowercase = shape[dimension]
__lowercase = torch.arange(0 , A__ , A__ )
__lowercase = torch.div(sizedim - size , A__ , rounding_mode='''floor''' ) + 1
__lowercase = torch.arange(A__ ) + low_indices[:min_length][:, None]
__lowercase = [slice(A__ )] * rank
__lowercase = indices
__lowercase = input[s]
__lowercase = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(A__ )
def _A ( A__ , A__ ):
"""simple docstring"""
import torch
__lowercase = torch.arange(1 , A__ )
__lowercase = torch.remainder(A__ , A__ )
__lowercase = remainders == 0
__lowercase = candidates[divisor_indices]
__lowercase = torch.max(A__ )
return largest_divisor, torch.div(A__ , A__ , rounding_mode='''floor''' )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(lowercase__ ,direction='''inputs''' )
__lowercase = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return self._config.num_heads
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
__lowercase = super(lowercase__ ,self ).generate_dummy_inputs(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
# We need to order the input in the way they appears in the forward()
__lowercase = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowercase , __lowercase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__lowercase = seqlen + 2
__lowercase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowercase = [
(torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) for _ in range(self.num_layers )
]
__lowercase = common_inputs['''attention_mask''']
if self.use_past:
__lowercase = ordered_inputs['''attention_mask'''].dtype
__lowercase = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(lowercase__ ,lowercase__ ,dtype=lowercase__ )] ,dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Any ):
return 1_3
| 52 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Tuple = StableDiffusionXLImgaImgPipeline
__magic_name__ :List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
__magic_name__ :Optional[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
__magic_name__ :Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__magic_name__ :str = IMAGE_TO_IMAGE_IMAGE_PARAMS
__magic_name__ :Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
lowerCAmelCase__ :str = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , )
torch.manual_seed(0 )
lowerCAmelCase__ :str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
lowerCAmelCase__ :str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=3_2 , )
lowerCAmelCase__ :int = CLIPTextModel(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=__UpperCAmelCase )
lowerCAmelCase__ :Any = CLIPTextModelWithProjection(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=__UpperCAmelCase )
lowerCAmelCase__ :str = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = image / 2 + 0.5
if str(__UpperCAmelCase ).startswith('mps' ):
lowerCAmelCase__ :Optional[int] = torch.manual_seed(__UpperCAmelCase )
else:
lowerCAmelCase__ :Optional[Any] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ :Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ :int = self.get_dummy_components()
lowerCAmelCase__ :List[str] = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase )
lowerCAmelCase__ :str = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :str = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ :int = sd_pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :int = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCAmelCase__ :List[str] = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def snake_case ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.get_dummy_components()
lowerCAmelCase__ :str = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase )
lowerCAmelCase__ :str = sd_pipe.to(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# forward without prompt embeds
lowerCAmelCase__ :int = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = 3 * ['this is a negative prompt']
lowerCAmelCase__ :Tuple = negative_prompt
lowerCAmelCase__ :str = 3 * [inputs['prompt']]
lowerCAmelCase__ :Optional[Any] = sd_pipe(**__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCAmelCase__ :Optional[Any] = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = 3 * ['this is a negative prompt']
lowerCAmelCase__ :str = 3 * [inputs.pop('prompt' )]
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) :List[str] = sd_pipe.encode_prompt(__UpperCAmelCase , negative_prompt=__UpperCAmelCase )
lowerCAmelCase__ :str = sd_pipe(
**__UpperCAmelCase , prompt_embeds=__UpperCAmelCase , negative_prompt_embeds=__UpperCAmelCase , pooled_prompt_embeds=__UpperCAmelCase , negative_pooled_prompt_embeds=__UpperCAmelCase , )
lowerCAmelCase__ :Optional[Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :Any = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ :Dict = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 6_4, 6_4) )
lowerCAmelCase__ :Optional[int] = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
lowerCAmelCase__ :int = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = self.get_inputs(__UpperCAmelCase )
lowerCAmelCase__ :int = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase__ :List[str] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 293 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 293 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Tuple = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 305 | from ....utils import logging
A : List[str] = logging.get_logger(__name__)
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Any=None , __magic_name__ : List[str]=2_048 ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = config.__dict__
SCREAMING_SNAKE_CASE_ = modal_hidden_size
if num_labels:
SCREAMING_SNAKE_CASE_ = num_labels
| 305 | 1 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
snake_case_ : List[Any] = logging.get_logger(__name__)
class lowercase__ ( lowercase ):
lowercase__ = """AutoTokenizer"""
lowercase__ = ["""tokenizer"""]
lowercase__ = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self : List[str] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Tuple=None ):
'''simple docstring'''
super().__init__(lowerCamelCase__ )
_UpperCamelCase : Dict = speaker_embeddings
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : str="speaker_embeddings_path.json" ,**lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
_UpperCamelCase : Optional[Any] = get_file_from_repo(
lowerCamelCase__ ,lowerCamelCase__ ,subfolder=kwargs.pop('subfolder' ,lowerCamelCase__ ) ,cache_dir=kwargs.pop('cache_dir' ,lowerCamelCase__ ) ,force_download=kwargs.pop('force_download' ,lowerCamelCase__ ) ,proxies=kwargs.pop('proxies' ,lowerCamelCase__ ) ,resume_download=kwargs.pop('resume_download' ,lowerCamelCase__ ) ,local_files_only=kwargs.pop('local_files_only' ,lowerCamelCase__ ) ,use_auth_token=kwargs.pop('use_auth_token' ,lowerCamelCase__ ) ,revision=kwargs.pop('revision' ,lowerCamelCase__ ) ,)
if speaker_embeddings_path is None:
logger.warning(
F'`{os.path.join(lowerCamelCase__ ,lowerCamelCase__ )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
_UpperCamelCase : Union[str, Any] = None
else:
with open(lowerCamelCase__ ) as speaker_embeddings_json:
_UpperCamelCase : Optional[int] = json.load(lowerCamelCase__ )
else:
_UpperCamelCase : Tuple = None
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
return cls(tokenizer=lowerCamelCase__ ,speaker_embeddings=lowerCamelCase__ )
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : int="speaker_embeddings_path.json" ,lowerCamelCase__ : Dict="speaker_embeddings" ,lowerCamelCase__ : bool = False ,**lowerCamelCase__ : Tuple ,):
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowerCamelCase__ ,lowerCamelCase__ ,'v2' ) ,exist_ok=lowerCamelCase__ )
_UpperCamelCase : Tuple = {}
_UpperCamelCase : Optional[Any] = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_UpperCamelCase : Any = self._load_voice_preset(lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] ,lowerCamelCase__ ,F'{prompt_key}_{key}' ) ,voice_preset[key] ,allow_pickle=lowerCamelCase__ ,)
_UpperCamelCase : List[str] = os.path.join(lowerCamelCase__ ,F'{prompt_key}_{key}.npy' )
_UpperCamelCase : str = tmp_dict
with open(os.path.join(lowerCamelCase__ ,lowerCamelCase__ ) ,'w' ) as fp:
json.dump(lowerCamelCase__ ,lowerCamelCase__ )
super().save_pretrained(lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ )
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : str = None ,**lowerCamelCase__ : Dict ):
'''simple docstring'''
_UpperCamelCase : Tuple = self.speaker_embeddings[voice_preset]
_UpperCamelCase : Union[str, Any] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
_UpperCamelCase : Dict = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' ,'/' ) ,voice_preset_paths[key] ,subfolder=kwargs.pop('subfolder' ,lowerCamelCase__ ) ,cache_dir=kwargs.pop('cache_dir' ,lowerCamelCase__ ) ,force_download=kwargs.pop('force_download' ,lowerCamelCase__ ) ,proxies=kwargs.pop('proxies' ,lowerCamelCase__ ) ,resume_download=kwargs.pop('resume_download' ,lowerCamelCase__ ) ,local_files_only=kwargs.pop('local_files_only' ,lowerCamelCase__ ) ,use_auth_token=kwargs.pop('use_auth_token' ,lowerCamelCase__ ) ,revision=kwargs.pop('revision' ,lowerCamelCase__ ) ,)
if path is None:
raise ValueError(
F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" ,"/" ) ,voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
_UpperCamelCase : List[str] = np.load(lowerCamelCase__ )
return voice_preset_dict
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : Optional[dict] = None ):
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] ,np.ndarray ):
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__( self : Any ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : Any="pt" ,lowerCamelCase__ : Dict=256 ,lowerCamelCase__ : int=False ,lowerCamelCase__ : int=True ,lowerCamelCase__ : List[str]=False ,**lowerCamelCase__ : Union[str, Any] ,):
'''simple docstring'''
if voice_preset is not None and not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
if (
isinstance(lowerCamelCase__ ,lowerCamelCase__ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_UpperCamelCase : Optional[int] = self._load_voice_preset(lowerCamelCase__ )
else:
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) and not voice_preset.endswith('.npz' ):
_UpperCamelCase : Tuple = voice_preset + '.npz'
_UpperCamelCase : str = np.load(lowerCamelCase__ )
if voice_preset is not None:
self._validate_voice_preset_dict(lowerCamelCase__ ,**lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = BatchFeature(data=lowerCamelCase__ ,tensor_type=lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = self.tokenizer(
lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,padding='max_length' ,max_length=lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,return_token_type_ids=lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,**lowerCamelCase__ ,)
if voice_preset is not None:
_UpperCamelCase : Optional[Any] = voice_preset
return encoded_text
| 83 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
A_ : Optional[int] = jnp.ones((batch_size, length) ) / length
return scores
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : Union[str, Any] = None
A_ : Any = 20
A_ : Any = self._get_uniform_logits(batch_size=2 , length=_SCREAMING_SNAKE_CASE )
# tweak scores to not be uniform anymore
A_ : Dict = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
A_ : Tuple = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
A_ : List[str] = jax.nn.softmax(_SCREAMING_SNAKE_CASE , axis=-1 )
A_ : Any = FlaxTemperatureLogitsWarper(temperature=0.5 )
A_ : Optional[int] = FlaxTemperatureLogitsWarper(temperature=1.3 )
A_ : Optional[Any] = jax.nn.softmax(temp_dist_warper_sharper(_SCREAMING_SNAKE_CASE , scores.copy() , cur_len=_SCREAMING_SNAKE_CASE ) , axis=-1 )
A_ : List[Any] = jax.nn.softmax(temp_dist_warper_smoother(_SCREAMING_SNAKE_CASE , scores.copy() , cur_len=_SCREAMING_SNAKE_CASE ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : Any = None
A_ : List[Any] = 10
A_ : str = 2
# create ramp distribution
A_ : Any = np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, vocab_size) ).copy()
A_ : List[Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
A_ : Any = FlaxTopKLogitsWarper(3 )
A_ : Tuple = top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
A_ : Optional[int] = 5
A_ : List[Any] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
A_ : Optional[Any] = np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, length) ).copy()
A_ : Dict = top_k_warp_safety_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : str = None
A_ : Optional[Any] = 10
A_ : Any = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
A_ : Optional[int] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
A_ : str = FlaxTopPLogitsWarper(0.8 )
A_ : Optional[int] = np.exp(top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
A_ : Tuple = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# check edge cases with negative and extreme logits
A_ : Union[str, Any] = np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
A_ : str = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
A_ : str = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
A_ : str = top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : str = 20
A_ : Union[str, Any] = 4
A_ : Optional[Any] = 0
A_ : Union[str, Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
# check that min length is applied at length 5
A_ : int = ids_tensor((batch_size, 20) , vocab_size=20 )
A_ : List[Any] = 5
A_ : Optional[int] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Optional[int] = min_dist_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
A_ : Tuple = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Any = 15
A_ : int = min_dist_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : Optional[int] = 20
A_ : Optional[int] = 4
A_ : Optional[int] = 0
A_ : Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
# check that all scores are -inf except the bos_token_id score
A_ : Optional[Any] = ids_tensor((batch_size, 1) , vocab_size=20 )
A_ : str = 1
A_ : List[str] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : List[str] = logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
A_ : Optional[int] = 3
A_ : List[Any] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Tuple = logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def _snake_case ( self )->List[str]:
'''simple docstring'''
A_ : Union[str, Any] = 20
A_ : str = 4
A_ : Dict = 0
A_ : Optional[int] = 5
A_ : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
# check that all scores are -inf except the eos_token_id when max_length is reached
A_ : List[Any] = ids_tensor((batch_size, 4) , vocab_size=20 )
A_ : Any = 4
A_ : Optional[Any] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : List[Any] = logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
A_ : int = 3
A_ : Union[str, Any] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Dict = logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def _snake_case ( self )->str:
'''simple docstring'''
A_ : str = 4
A_ : Dict = 10
A_ : Union[str, Any] = 15
A_ : str = 2
A_ : int = 1
A_ : List[str] = 15
# dummy input_ids and scores
A_ : Tuple = ids_tensor((batch_size, sequence_length) , _SCREAMING_SNAKE_CASE )
A_ : int = input_ids.copy()
A_ : List[Any] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = scores.copy()
# instantiate all dist processors
A_ : Dict = FlaxTemperatureLogitsWarper(temperature=0.5 )
A_ : Any = FlaxTopKLogitsWarper(3 )
A_ : List[Any] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
A_ : List[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
A_ : Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = 10
# no processor list
A_ : int = temp_dist_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : List[str] = top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Any = top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Dict = min_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = bos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = eos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# with processor list
A_ : Any = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
A_ : List[str] = processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# scores should be equal
self.assertTrue(jnp.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ : str = 4
A_ : Dict = 10
A_ : Tuple = 15
A_ : List[str] = 2
A_ : List[str] = 1
A_ : Union[str, Any] = 15
# dummy input_ids and scores
A_ : Any = ids_tensor((batch_size, sequence_length) , _SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = input_ids.copy()
A_ : Optional[Any] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Tuple = scores.copy()
# instantiate all dist processors
A_ : List[str] = FlaxTemperatureLogitsWarper(temperature=0.5 )
A_ : Optional[Any] = FlaxTopKLogitsWarper(3 )
A_ : int = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
A_ : List[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
A_ : Dict = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
A_ : str = 10
# no processor list
def run_no_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : int = temp_dist_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : List[Any] = top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Dict = min_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Any = bos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = eos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
return scores
# with processor list
def run_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : Optional[int] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
A_ : Optional[int] = processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
return scores
A_ : Optional[int] = jax.jit(_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = jax.jit(_SCREAMING_SNAKE_CASE )
A_ : Dict = jitted_run_no_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : List[Any] = jitted_run_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# scores should be equal
self.assertTrue(jnp.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 186 | 0 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowercase__ = 'python tqdm regex requests packaging filelock numpy tokenizers'.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('dataclasses')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('importlib_metadata')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=None ):
require_version(deps[pkg] , SCREAMING_SNAKE_CASE__ )
| 362 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_A = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
_A = parser.parse_args()
if args.model_type == "bert":
_A = BertForMaskedLM.from_pretrained(args.model_name)
_A = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
_A = model.state_dict()
_A = {}
for w in ["word_embeddings", "position_embeddings"]:
_A = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
_A = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
_A = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
_A = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
_A = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
_A = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
_A = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
_A = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
_A = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
_A = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
_A = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
_A = state_dict['cls.predictions.decoder.weight']
_A = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
_A = state_dict[f"""cls.predictions.transform.dense.{w}"""]
_A = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 117 | 0 |
"""simple docstring"""
import os
import pytest
from attr import dataclass
lowerCAmelCase__ : Any = 'us-east-1' # defaults region
@dataclass
class snake_case :
"""simple docstring"""
snake_case__ = 42
snake_case__ = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
snake_case__ = {
"task_name": "mnli",
"per_device_train_batch_size": 16,
"per_device_eval_batch_size": 16,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 5_00,
"save_steps": 55_00,
}
snake_case__ = {**hyperparameters, "max_steps": 10_00}
@property
def __lowerCAmelCase ( self : Dict ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __lowerCAmelCase ( self : List[str] ):
return f'''{self.framework}-transfromers-test'''
@property
def __lowerCAmelCase ( self : int ):
return f'''./tests/sagemaker/scripts/{self.framework}'''
@property
def __lowerCAmelCase ( self : List[str] ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = SageMakerTestEnvironment(framework=request.cls.framework )
| 98 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case : List[Any] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
_snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 284 | 0 |
from datetime import datetime
import requests
def _lowercase ( _UpperCAmelCase ) -> bytes:
lowerCamelCase ="""https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
lowerCamelCase =requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(_UpperCAmelCase ).content
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] =input('''Enter Video/IGTV url: ''').strip()
UpperCAmelCase__ : str =F"{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"
with open(file_name, '''wb''') as fp:
fp.write(download_video(url))
print(F"Done. Video saved to disk as {file_name}.")
| 362 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCAmelCase__ : List[Any] =logging.get_logger(__name__)
UpperCAmelCase__ : Dict ={'''vocab_file''': '''spiece.model'''}
UpperCAmelCase__ : Dict ={
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
UpperCAmelCase__ : List[str] ={
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
UpperCAmelCase__ : Any =0
UpperCAmelCase__ : List[Any] =1
UpperCAmelCase__ : Union[str, Any] =2
UpperCAmelCase__ : Tuple =3
UpperCAmelCase__ : int =4
class __A ( a ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = """left"""
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_="<s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="<unk>" , UpperCAmelCase_="<sep>" , UpperCAmelCase_="<pad>" , UpperCAmelCase_="<cls>" , UpperCAmelCase_="<mask>" , UpperCAmelCase_=["<eop>", "<eod>"] , UpperCAmelCase_ = None , **UpperCAmelCase_ , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase =AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
lowerCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase_ , remove_space=UpperCAmelCase_ , keep_accents=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
lowerCamelCase =3
lowerCamelCase =do_lower_case
lowerCamelCase =remove_space
lowerCamelCase =keep_accents
lowerCamelCase =vocab_file
lowerCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase_ )
@property
def _snake_case ( self ):
return len(self.sp_model )
def _snake_case ( self ):
lowerCamelCase ={self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
lowerCamelCase =self.__dict__.copy()
lowerCamelCase =None
return state
def __setstate__( self , UpperCAmelCase_ ):
lowerCamelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCamelCase ={}
lowerCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , UpperCAmelCase_ ):
if self.remove_space:
lowerCamelCase =""" """.join(inputs.strip().split() )
else:
lowerCamelCase =inputs
lowerCamelCase =outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
lowerCamelCase =unicodedata.normalize("""NFKD""" , UpperCAmelCase_ )
lowerCamelCase ="""""".join([c for c in outputs if not unicodedata.combining(UpperCAmelCase_ )] )
if self.do_lower_case:
lowerCamelCase =outputs.lower()
return outputs
def _snake_case ( self , UpperCAmelCase_ ):
lowerCamelCase =self.preprocess_text(UpperCAmelCase_ )
lowerCamelCase =self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
lowerCamelCase =[]
for piece in pieces:
if len(UpperCAmelCase_ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
lowerCamelCase =self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase_ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase =cur_pieces[1:]
else:
lowerCamelCase =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCAmelCase_ )
else:
new_pieces.append(UpperCAmelCase_ )
return new_pieces
def _snake_case ( self , UpperCAmelCase_ ):
return self.sp_model.PieceToId(UpperCAmelCase_ )
def _snake_case ( self , UpperCAmelCase_ ):
return self.sp_model.IdToPiece(UpperCAmelCase_ )
def _snake_case ( self , UpperCAmelCase_ ):
lowerCamelCase ="""""".join(UpperCAmelCase_ ).replace(UpperCAmelCase_ , """ """ ).strip()
return out_string
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = True , **UpperCAmelCase_ , ):
lowerCamelCase =kwargs.pop("""use_source_tokenizer""" , UpperCAmelCase_ )
lowerCamelCase =self.convert_ids_to_tokens(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase =[]
lowerCamelCase =[]
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase_ ) )
lowerCamelCase =[]
sub_texts.append(UpperCAmelCase_ )
else:
current_sub_text.append(UpperCAmelCase_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowerCamelCase ="""""".join(UpperCAmelCase_ )
lowerCamelCase =(
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase =self.clean_up_tokenization(UpperCAmelCase_ )
return clean_text
else:
return text
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
lowerCamelCase =[self.sep_token_id]
lowerCamelCase =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is not None:
return ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1]
return ([0] * len(UpperCAmelCase_ )) + [1, 1]
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
lowerCamelCase =[self.sep_token_id]
lowerCamelCase =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase =os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , """wb""" ) as fi:
lowerCamelCase =self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
| 262 | 0 |
'''simple docstring'''
def __snake_case ( ):
lowerCamelCase_ = []
lowerCamelCase_ = 1
while len(UpperCAmelCase_ ) < 1E6:
constant.append(str(UpperCAmelCase_ ) )
i += 1
lowerCamelCase_ = "".join(UpperCAmelCase_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 55 |
'''simple docstring'''
from __future__ import annotations
def __snake_case ( UpperCAmelCase_ : int ):
lowerCamelCase_ = 2
lowerCamelCase_ = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(UpperCAmelCase_ )
if n > 1:
factors.append(UpperCAmelCase_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55 | 1 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : List[Any] = tmp_path / """cache"""
_UpperCAmelCase : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase : List[Any] = ParquetDatasetReader(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ ).read()
_check_parquet_dataset(lowerCAmelCase_ , lowerCAmelCase_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Any = tmp_path / """cache"""
_UpperCAmelCase : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase : List[str] = features.copy() if features else default_expected_features
_UpperCAmelCase : Optional[int] = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase : Optional[Any] = ParquetDatasetReader(lowerCAmelCase_ , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ).read()
_check_parquet_dataset(lowerCAmelCase_ , lowerCAmelCase_ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Any = tmp_path / """cache"""
_UpperCAmelCase : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase : List[Any] = ParquetDatasetReader(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , split=lowerCAmelCase_ ).read()
_check_parquet_dataset(lowerCAmelCase_ , lowerCAmelCase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
if issubclass(lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Tuple = parquet_path
elif issubclass(lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Tuple = [parquet_path]
_UpperCAmelCase : Tuple = tmp_path / """cache"""
_UpperCAmelCase : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase : Optional[Any] = ParquetDatasetReader(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ).read()
_check_parquet_dataset(lowerCAmelCase_ , lowerCAmelCase_ )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=("train",) ):
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
for split in splits:
_UpperCAmelCase : List[str] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : str = tmp_path / """cache"""
_UpperCAmelCase : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase : Optional[Any] = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ ).read()
_check_parquet_datasetdict(lowerCAmelCase_ , lowerCAmelCase_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : int = tmp_path / """cache"""
_UpperCAmelCase : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase : List[Any] = features.copy() if features else default_expected_features
_UpperCAmelCase : Optional[int] = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase : Union[str, Any] = ParquetDatasetReader({"""train""": parquet_path} , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ).read()
_check_parquet_datasetdict(lowerCAmelCase_ , lowerCAmelCase_ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
if split:
_UpperCAmelCase : Optional[int] = {split: parquet_path}
else:
_UpperCAmelCase : List[Any] = """train"""
_UpperCAmelCase : Union[str, Any] = {"""train""": parquet_path, """test""": parquet_path}
_UpperCAmelCase : Optional[int] = tmp_path / """cache"""
_UpperCAmelCase : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase : Optional[int] = ParquetDatasetReader(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ).read()
_check_parquet_datasetdict(lowerCAmelCase_ , lowerCAmelCase_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Any = ParquetDatasetWriter(lowerCAmelCase_ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_UpperCAmelCase : List[str] = pq.ParquetFile(tmp_path / """foo.parquet""" )
_UpperCAmelCase : Tuple = pf.read()
assert dataset.data.table == output_table
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : List[Any] = str(shared_datadir / """test_image_rgb.jpg""" )
_UpperCAmelCase : Tuple = {"""image""": [image_path]}
_UpperCAmelCase : List[Any] = Features({"""image""": Image()} )
_UpperCAmelCase : str = Dataset.from_dict(lowerCAmelCase_ , features=lowerCAmelCase_ )
_UpperCAmelCase : List[str] = ParquetDatasetWriter(lowerCAmelCase_ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_UpperCAmelCase : Union[str, Any] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
_UpperCAmelCase : Union[str, Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=lowerCAmelCase_ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
assert get_writer_batch_size(lowerCAmelCase_ ) == expected
| 170 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.17.0.dev0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
lowerCAmelCase_ : Optional[Any] = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
snake_case : Optional[str] = field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
snake_case : Optional[str] = field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
snake_case : int = field(
default=1_0_2_4 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
snake_case : bool = field(
default=__a , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
snake_case : bool = field(
default=__a , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
snake_case : Optional[int] = field(
default=__a , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
snake_case : Optional[int] = field(
default=__a , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
snake_case : Optional[int] = field(
default=__a , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
snake_case : Optional[str] = field(
default=__a , metadata={"""help""": """A csv or a json file containing the training data."""} )
snake_case : Optional[str] = field(
default=__a , metadata={"""help""": """A csv or a json file containing the validation data."""} )
snake_case : Optional[str] = field(default=__a , metadata={"""help""": """A csv or a json file containing the test data."""} )
def snake_case_ (self ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
_UpperCAmelCase : List[str] = self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_UpperCAmelCase : Union[str, Any] = self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __lowerCAmelCase :
snake_case : str = field(
default=__a , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
snake_case : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
snake_case : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
snake_case : Optional[str] = field(
default=__a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
snake_case : bool = field(
default=__a , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
snake_case : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
snake_case : bool = field(
default=__a , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def __A ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Tuple = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
_UpperCAmelCase : Dict = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase_ )
datasets.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_UpperCAmelCase : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCAmelCase : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_UpperCAmelCase : Dict = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_UpperCAmelCase : int = data_args.train_file.split(""".""" )[-1]
_UpperCAmelCase : str = data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_UpperCAmelCase : Optional[Any] = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(f"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
_UpperCAmelCase : List[str] = load_dataset("""csv""" , data_files=lowerCAmelCase_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_UpperCAmelCase : List[str] = load_dataset("""json""" , data_files=lowerCAmelCase_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_UpperCAmelCase : Optional[int] = raw_datasets["""train"""].features["""label"""].names
_UpperCAmelCase : Optional[Any] = len(lowerCAmelCase_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_UpperCAmelCase : Optional[Any] = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowerCAmelCase_ , )
_UpperCAmelCase : str = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_UpperCAmelCase : int = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_UpperCAmelCase : List[str] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_UpperCAmelCase : Dict = {"""Refused""": 0, """Entailed""": 1}
_UpperCAmelCase : List[Any] = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
_UpperCAmelCase : str = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCAmelCase_ ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCAmelCase_ ):
_UpperCAmelCase : int = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
_UpperCAmelCase : Union[str, Any] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_UpperCAmelCase : Tuple = examples["""statement"""]
_UpperCAmelCase : str = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) )
_UpperCAmelCase : Optional[int] = tokenizer(lowerCAmelCase_ , lowerCAmelCase_ , padding=lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ )
_UpperCAmelCase : int = examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
_UpperCAmelCase : str = raw_datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
_UpperCAmelCase : Dict = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
_UpperCAmelCase : List[Any] = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
_UpperCAmelCase : Optional[int] = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
_UpperCAmelCase : Dict = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
_UpperCAmelCase : Tuple = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
_UpperCAmelCase : Tuple = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCAmelCase_ ) ) , 3 ):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase_ ):
_UpperCAmelCase : Dict = p.predictions[0] if isinstance(p.predictions , lowerCAmelCase_ ) else p.predictions
_UpperCAmelCase : int = np.argmax(lowerCAmelCase_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_UpperCAmelCase : Dict = default_data_collator
elif training_args.fpaa:
_UpperCAmelCase : List[str] = DataCollatorWithPadding(lowerCAmelCase_ , pad_to_multiple_of=8 )
else:
_UpperCAmelCase : Optional[Any] = None
# Initialize our Trainer
_UpperCAmelCase : Optional[Any] = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
_UpperCAmelCase : Any = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase : Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase : Optional[Any] = last_checkpoint
_UpperCAmelCase : Any = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = train_result.metrics
_UpperCAmelCase : Any = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase_ )
)
_UpperCAmelCase : Optional[Any] = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , lowerCAmelCase_ )
trainer.save_metrics("""train""" , lowerCAmelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_UpperCAmelCase : Dict = trainer.evaluate(eval_dataset=lowerCAmelCase_ )
_UpperCAmelCase : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase_ )
_UpperCAmelCase : Optional[int] = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.log_metrics("""eval""" , lowerCAmelCase_ )
trainer.save_metrics("""eval""" , lowerCAmelCase_ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_UpperCAmelCase : str = predict_dataset.remove_columns("""label""" )
_UpperCAmelCase : List[Any] = trainer.predict(lowerCAmelCase_ , metric_key_prefix="""predict""" ).predictions
_UpperCAmelCase : Dict = np.argmax(lowerCAmelCase_ , axis=1 )
_UpperCAmelCase : List[Any] = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , """w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(lowerCAmelCase_ ):
_UpperCAmelCase : List[str] = label_list[item]
writer.write(f"{index}\t{item}\n" )
_UpperCAmelCase : Union[str, Any] = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase_ )
else:
trainer.create_model_card(**lowerCAmelCase_ )
def __A ( lowerCAmelCase_ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 170 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__A =logging.get_logger(__name__)
__A ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__A ={
'''vocab_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
__A ={
'''yjernite/retribert-base-uncased''': 5_1_2,
}
__A ={
'''yjernite/retribert-base-uncased''': {'''do_lower_case''': True},
}
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = RetriBertTokenizer
lowerCAmelCase__ = ['input_ids', 'attention_mask']
def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> List[Any]:
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowercase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowercase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowercase ) != tokenize_chinese_chars
):
lowerCamelCase_ = getattr(lowercase , normalizer_state.pop("type" ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = tokenize_chinese_chars
lowerCamelCase_ = normalizer_class(**lowercase )
lowerCamelCase_ = do_lower_case
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=None ) -> int:
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> Tuple[str]:
lowerCamelCase_ = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
| 19 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , *lowercase , lowercase=None , lowercase=None , **lowercase ) -> List[str]:
super().__init__(*lowercase , **lowercase )
lowerCamelCase_ = eval_examples
lowerCamelCase_ = post_process_function
def SCREAMING_SNAKE_CASE_( self , lowercase = None , lowercase=None , lowercase = None , lowercase = "eval" , **lowercase , ) -> Dict[str, float]:
lowerCamelCase_ = gen_kwargs.copy()
lowerCamelCase_ = (
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
lowerCamelCase_ = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
lowerCamelCase_ = gen_kwargs
lowerCamelCase_ = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCamelCase_ = self.get_eval_dataloader(lowercase )
lowerCamelCase_ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase_ = self.compute_metrics
lowerCamelCase_ = None
lowerCamelCase_ = time.time()
lowerCamelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase_ = eval_loop(
lowercase , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase , metric_key_prefix=lowercase , )
finally:
lowerCamelCase_ = compute_metrics
lowerCamelCase_ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowercase , lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCamelCase_ = self.post_process_function(lowercase , lowercase , lowercase )
lowerCamelCase_ = self.compute_metrics(lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowerCamelCase_ = metrics.pop(lowercase )
metrics.update(output.metrics )
else:
lowerCamelCase_ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCamelCase_ = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase )
return metrics
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=None , lowercase = "test" , **lowercase ) -> Union[str, Any]:
lowerCamelCase_ = gen_kwargs.copy()
lowerCamelCase_ = self.get_test_dataloader(lowercase )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase_ = self.compute_metrics
lowerCamelCase_ = None
lowerCamelCase_ = time.time()
lowerCamelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase_ = eval_loop(
lowercase , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase , metric_key_prefix=lowercase , )
finally:
lowerCamelCase_ = compute_metrics
lowerCamelCase_ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowercase , lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCamelCase_ = self.post_process_function(lowercase , lowercase , lowercase , "predict" )
lowerCamelCase_ = self.compute_metrics(lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowerCamelCase_ = metrics.pop(lowercase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase )
| 19 | 1 |
'''simple docstring'''
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
_lowercase = 299792458
# Symbols
_lowercase , _lowercase , _lowercase , _lowercase = symbols("""ct x y z""")
def A (__lowerCamelCase :float ):
if velocity > c:
raise ValueError("""Speed must not exceed light speed 299,792,458 [m/s]!""" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("""Speed must be greater than or equal to 1!""" )
return velocity / c
def A (__lowerCamelCase :float ):
return 1 / sqrt(1 - beta(__lowerCamelCase ) ** 2 )
def A (__lowerCamelCase :float ):
return np.array(
[
[gamma(__lowerCamelCase ), -gamma(__lowerCamelCase ) * beta(__lowerCamelCase ), 0, 0],
[-gamma(__lowerCamelCase ) * beta(__lowerCamelCase ), gamma(__lowerCamelCase ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def A (__lowerCamelCase :float , __lowerCamelCase :np.ndarray | None = None ):
# Ensure event is not empty
if event is None:
_lowerCAmelCase = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(__lowerCamelCase ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
_lowercase = transform(29979245)
print("""Example of four vector: """)
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
_lowercase = {ct: c, x: 1, y: 1, z: 1}
_lowercase = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""")
| 358 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 229 | 0 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_UpperCamelCase = sys.version_info >= (3, 10)
def a_ ( _lowerCAmelCase=None ,_lowerCAmelCase=None ) -> Any:
return field(default_factory=lambda: default ,metadata=snake_case__ )
@dataclass
class lowerCamelCase_ :
"""simple docstring"""
a_ =42
a_ =42
a_ =42
a_ =42
@dataclass
class lowerCamelCase_ :
"""simple docstring"""
a_ =42
a_ =field(default="""toto""" , metadata={"""help""": """help message"""} )
@dataclass
class lowerCamelCase_ :
"""simple docstring"""
a_ =False
a_ =True
a_ =None
class lowerCamelCase_ ( UpperCamelCase_ ):
"""simple docstring"""
a_ ="""titi"""
a_ ="""toto"""
class lowerCamelCase_ ( UpperCamelCase_ ):
"""simple docstring"""
a_ ="""titi"""
a_ ="""toto"""
a_ =42
@dataclass
class lowerCamelCase_ :
"""simple docstring"""
a_ ="toto"
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
__lowerCamelCase : Dict = BasicEnum(self.foo )
@dataclass
class lowerCamelCase_ :
"""simple docstring"""
a_ ="toto"
def _lowercase ( self : str ) -> Tuple:
__lowerCamelCase : Dict = MixedTypeEnum(self.foo )
@dataclass
class lowerCamelCase_ :
"""simple docstring"""
a_ =None
a_ =field(default=UpperCamelCase_ , metadata={"""help""": """help message"""} )
a_ =None
a_ =list_field(default=[] )
a_ =list_field(default=[] )
@dataclass
class lowerCamelCase_ :
"""simple docstring"""
a_ =list_field(default=[] )
a_ =list_field(default=[1, 2, 3] )
a_ =list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
a_ =list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class lowerCamelCase_ :
"""simple docstring"""
a_ =field()
a_ =field()
a_ =field()
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
__lowerCamelCase : Union[str, Any] = BasicEnum(self.required_enum )
@dataclass
class lowerCamelCase_ :
"""simple docstring"""
a_ =42
a_ =field()
a_ =None
a_ =field(default="""toto""" , metadata={"""help""": """help message"""} )
a_ =list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
if is_python_no_less_than_3_10:
@dataclass
class lowerCamelCase_ :
"""simple docstring"""
a_ =False
a_ =True
a_ =None
@dataclass
class lowerCamelCase_ :
"""simple docstring"""
a_ =None
a_ =field(default=UpperCamelCase_ , metadata={"""help""": """help message"""} )
a_ =None
a_ =list_field(default=[] )
a_ =list_field(default=[] )
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Union[str, Any] , _a : Union[str, Any] , _a : int ) -> int:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
__lowerCamelCase : str = {k: v for k, v in vars(__SCREAMING_SNAKE_CASE ).items() if k != 'container'}
__lowerCamelCase : Optional[Any] = {k: v for k, v in vars(__SCREAMING_SNAKE_CASE ).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , __SCREAMING_SNAKE_CASE ) and yy.get('choices' , __SCREAMING_SNAKE_CASE ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](__SCREAMING_SNAKE_CASE ) , yy['type'](__SCREAMING_SNAKE_CASE ) )
del xx["type"], yy["type"]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self : Any ) -> Tuple:
__lowerCamelCase : Optional[Any] = HfArgumentParser(__SCREAMING_SNAKE_CASE )
__lowerCamelCase : List[str] = argparse.ArgumentParser()
expected.add_argument('--foo' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument('--bar' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument('--baz' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument('--flag' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , const=__SCREAMING_SNAKE_CASE , nargs='?' )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCamelCase : Dict = ['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((__lowerCamelCase ) , ) : Any = parser.parse_args_into_dataclasses(__SCREAMING_SNAKE_CASE , look_for_args_file=__SCREAMING_SNAKE_CASE )
self.assertFalse(example.flag )
def _lowercase ( self : Optional[int] ) -> Any:
__lowerCamelCase : int = HfArgumentParser(__SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('--baz' , default='toto' , type=__SCREAMING_SNAKE_CASE , help='help message' )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self : Any ) -> List[str]:
__lowerCamelCase : str = argparse.ArgumentParser()
expected.add_argument('--foo' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , const=__SCREAMING_SNAKE_CASE , nargs='?' )
expected.add_argument('--baz' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , const=__SCREAMING_SNAKE_CASE , nargs='?' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=__SCREAMING_SNAKE_CASE , dest='baz' )
expected.add_argument('--opt' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[int] = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__SCREAMING_SNAKE_CASE )
for dataclass_type in dataclass_types:
__lowerCamelCase : int = HfArgumentParser(__SCREAMING_SNAKE_CASE )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCamelCase : int = parser.parse_args([] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) )
__lowerCamelCase : str = parser.parse_args(['--foo', '--no_baz'] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) )
__lowerCamelCase : Union[str, Any] = parser.parse_args(['--foo', '--baz'] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) )
__lowerCamelCase : Tuple = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) )
__lowerCamelCase : Tuple = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) )
def _lowercase ( self : str ) -> List[str]:
__lowerCamelCase : Optional[int] = HfArgumentParser(__SCREAMING_SNAKE_CASE )
__lowerCamelCase : Dict = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCamelCase : str = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
__lowerCamelCase : int = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
__lowerCamelCase : Optional[int] = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
__lowerCamelCase : Union[str, Any] = parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
__lowerCamelCase : List[str] = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
__lowerCamelCase : Dict = parser.parse_args_into_dataclasses(['--foo', '42'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _lowercase ( self : Union[str, Any] ) -> Dict:
@dataclass
class lowerCamelCase_ :
"""simple docstring"""
a_ ="toto"
__lowerCamelCase : int = HfArgumentParser(__SCREAMING_SNAKE_CASE )
__lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCamelCase : str = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
__lowerCamelCase : Union[str, Any] = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
__lowerCamelCase : Union[str, Any] = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
def _lowercase ( self : int ) -> List[Any]:
__lowerCamelCase : Tuple = HfArgumentParser(__SCREAMING_SNAKE_CASE )
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=__SCREAMING_SNAKE_CASE )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCamelCase : str = parser.parse_args([] )
self.assertEqual(
__SCREAMING_SNAKE_CASE , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , )
__lowerCamelCase : List[str] = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) )
def _lowercase ( self : Dict ) -> Optional[int]:
__lowerCamelCase : Tuple = argparse.ArgumentParser()
expected.add_argument('--foo' , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('--bar' , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help='help message' )
expected.add_argument('--baz' , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('--ces' , nargs='+' , default=[] , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('--des' , nargs='+' , default=[] , type=__SCREAMING_SNAKE_CASE )
__lowerCamelCase : int = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__SCREAMING_SNAKE_CASE )
for dataclass_type in dataclass_types:
__lowerCamelCase : Optional[int] = HfArgumentParser(__SCREAMING_SNAKE_CASE )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCamelCase : Any = parser.parse_args([] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , bar=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , ces=[] , des=[] ) )
__lowerCamelCase : Tuple = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCamelCase : Optional[Any] = HfArgumentParser(__SCREAMING_SNAKE_CASE )
__lowerCamelCase : int = argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument('--required_str' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=__SCREAMING_SNAKE_CASE , )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self : str ) -> List[Any]:
__lowerCamelCase : Optional[int] = HfArgumentParser(__SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
expected.add_argument('--foo' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=__SCREAMING_SNAKE_CASE , )
expected.add_argument('--opt' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE )
expected.add_argument('--baz' , default='toto' , type=__SCREAMING_SNAKE_CASE , help='help message' )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=__SCREAMING_SNAKE_CASE )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
__lowerCamelCase : Union[str, Any] = HfArgumentParser(__SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[Any] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
__lowerCamelCase : Tuple = parser.parse_dict(__SCREAMING_SNAKE_CASE )[0]
__lowerCamelCase : int = BasicExample(**__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self : str ) -> Dict:
__lowerCamelCase : List[str] = HfArgumentParser(__SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[int] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(__SCREAMING_SNAKE_CASE , parser.parse_dict , __SCREAMING_SNAKE_CASE , allow_extra_keys=__SCREAMING_SNAKE_CASE )
def _lowercase ( self : Optional[Any] ) -> int:
__lowerCamelCase : Optional[int] = HfArgumentParser(__SCREAMING_SNAKE_CASE )
__lowerCamelCase : int = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase : List[str] = os.path.join(__SCREAMING_SNAKE_CASE , 'temp_json' )
os.mkdir(__SCREAMING_SNAKE_CASE )
with open(temp_local_path + '.json' , 'w+' ) as f:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCamelCase : List[Any] = parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0]
__lowerCamelCase : List[str] = BasicExample(**__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
__lowerCamelCase : int = HfArgumentParser(__SCREAMING_SNAKE_CASE )
__lowerCamelCase : Union[str, Any] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase : List[Any] = os.path.join(__SCREAMING_SNAKE_CASE , 'temp_yaml' )
os.mkdir(__SCREAMING_SNAKE_CASE )
with open(temp_local_path + '.yaml' , 'w+' ) as f:
yaml.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCamelCase : str = parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0]
__lowerCamelCase : Optional[Any] = BasicExample(**__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self : str ) -> Union[str, Any]:
__lowerCamelCase : str = HfArgumentParser(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
| 208 | import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowercase__ : Any = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str:
lowerCAmelCase = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
lowercase__ : List[Any] = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]:
lowerCAmelCase = list(s_dict.keys() )
for key in keys:
lowerCAmelCase = key
for k, v in WHISPER_MAPPING.items():
if k in key:
lowerCAmelCase = new_key.replace(snake_case__ , snake_case__ )
print(f"{key} -> {new_key}" )
lowerCAmelCase = s_dict.pop(snake_case__ )
return s_dict
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]:
lowerCAmelCase , lowerCAmelCase = emb.weight.shape
lowerCAmelCase = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
lowerCAmelCase = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> bytes:
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowerCAmelCase = os.path.basename(snake_case__ )
lowerCAmelCase = url.split('''/''' )[-2]
lowerCAmelCase = os.path.join(snake_case__ , snake_case__ )
if os.path.exists(snake_case__ ) and not os.path.isfile(snake_case__ ):
raise RuntimeError(f"{download_target} exists and is not a regular file" )
if os.path.isfile(snake_case__ ):
lowerCAmelCase = open(snake_case__ , '''rb''' ).read()
if hashlib.shaaaa(snake_case__ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(snake_case__ ) as source, open(snake_case__ , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=8_0 , unit='''iB''' , unit_scale=snake_case__ , unit_divisor=1_0_2_4 ) as loop:
while True:
lowerCAmelCase = source.read(8_1_9_2 )
if not buffer:
break
output.write(snake_case__ )
loop.update(len(snake_case__ ) )
lowerCAmelCase = open(snake_case__ , '''rb''' ).read()
if hashlib.shaaaa(snake_case__ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str:
if ".pt" not in checkpoint_path:
lowerCAmelCase = _download(_MODELS[checkpoint_path] )
else:
lowerCAmelCase = torch.load(snake_case__ , map_location='''cpu''' )
lowerCAmelCase = original_checkpoint['''dims''']
lowerCAmelCase = original_checkpoint['''model_state_dict''']
lowerCAmelCase = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(snake_case__ )
rename_keys(snake_case__ )
lowerCAmelCase = True
lowerCAmelCase = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
lowerCAmelCase = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=snake_case__ , decoder_ffn_dim=snake_case__ , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
lowerCAmelCase = WhisperForConditionalGeneration(snake_case__ )
lowerCAmelCase , lowerCAmelCase = model.model.load_state_dict(snake_case__ , strict=snake_case__ )
if len(snake_case__ ) > 0 and not set(snake_case__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f" but all the following weights are missing {missing}" )
if tie_embeds:
lowerCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowerCAmelCase = proj_out_weights
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowercase__ : List[str] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowercase__ : int = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 338 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase_ : List[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["pixel_values"]
def __init__( self , __A = True , __A = None , __A = PILImageResampling.BICUBIC , __A = True , __A = None , __A = True , __A = 1 / 255 , __A = True , __A = None , __A = None , __A = True , **__A , ) -> None:
super().__init__(**__A )
a =size if size is not None else {'''shortest_edge''': 224}
a =get_size_dict(__A , default_to_square=__A )
a =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
a =get_size_dict(__A , default_to_square=__A , param_name='''crop_size''' )
a =do_resize
a =size
a =resample
a =do_center_crop
a =crop_size
a =do_rescale
a =rescale_factor
a =do_normalize
a =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
a =image_std if image_std is not None else OPENAI_CLIP_STD
a =do_convert_rgb
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A = PILImageResampling.BICUBIC , __A = None , **__A , ) -> np.ndarray:
a =get_size_dict(__A , default_to_square=__A )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
a =get_resize_output_image_size(__A , size=size['''shortest_edge'''] , default_to_square=__A )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A = None , **__A , ) -> np.ndarray:
a =get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(__A , size=(size['''height'''], size['''width''']) , data_format=__A , **__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A = None , **__A , ) -> Any:
return rescale(__A , scale=__A , data_format=__A , **__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A = None , **__A , ) -> np.ndarray:
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ) -> PIL.Image.Image:
a =do_resize if do_resize is not None else self.do_resize
a =size if size is not None else self.size
a =get_size_dict(__A , param_name='''size''' , default_to_square=__A )
a =resample if resample is not None else self.resample
a =do_center_crop if do_center_crop is not None else self.do_center_crop
a =crop_size if crop_size is not None else self.crop_size
a =get_size_dict(__A , param_name='''crop_size''' , default_to_square=__A )
a =do_rescale if do_rescale is not None else self.do_rescale
a =rescale_factor if rescale_factor is not None else self.rescale_factor
a =do_normalize if do_normalize is not None else self.do_normalize
a =image_mean if image_mean is not None else self.image_mean
a =image_std if image_std is not None else self.image_std
a =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
a =make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
a =[convert_to_rgb(__A ) for image in images]
# All transformations expect numpy arrays.
a =[to_numpy_array(__A ) for image in images]
if do_resize:
a =[self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_center_crop:
a =[self.center_crop(image=__A , size=__A ) for image in images]
if do_rescale:
a =[self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
a =[self.normalize(image=__A , mean=__A , std=__A ) for image in images]
a =[to_channel_dimension_format(__A , __A ) for image in images]
a ={'''pixel_values''': images}
return BatchFeature(data=__A , tensor_type=__A ) | 354 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ : str = logging.get_logger(__name__)
lowerCamelCase_ : Optional[int] = {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"""
),
}
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = "xlm-roberta"
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.02 , __A=1E-1_2 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ) -> str:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
a =vocab_size
a =hidden_size
a =num_hidden_layers
a =num_attention_heads
a =hidden_act
a =intermediate_size
a =hidden_dropout_prob
a =attention_probs_dropout_prob
a =max_position_embeddings
a =type_vocab_size
a =initializer_range
a =layer_norm_eps
a =position_embedding_type
a =use_cache
a =classifier_dropout
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
a ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 215 | 0 |
def UpperCamelCase ( _A, _A, _A, _A, _A ):
"""simple docstring"""
if index == number_of_items:
return 0
__magic_name__ : Union[str, Any] = 0
__magic_name__ : str = 0
__magic_name__ : Optional[int] = knapsack(_A, _A, _A, _A, index + 1 )
if weights[index] <= max_weight:
__magic_name__ : Optional[Any] = values[index] + knapsack(
_A, _A, _A, max_weight - weights[index], index + 1 )
return max(_A, _A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=16 , lowerCAmelCase__=[32, 64, 1_28] , lowerCAmelCase__=[1, 2, 1] , lowerCAmelCase__=[2, 2, 4] , lowerCAmelCase__=2 , lowerCAmelCase__=2.0 , lowerCAmelCase__=True , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__="gelu" , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1e-5 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=10 , lowerCAmelCase__=8 , lowerCAmelCase__=["stage1", "stage2"] , lowerCAmelCase__=[1, 2] , ) -> str:
__magic_name__ : Optional[int] = parent
__magic_name__ : Any = batch_size
__magic_name__ : Union[str, Any] = image_size
__magic_name__ : Optional[int] = patch_size
__magic_name__ : Union[str, Any] = num_channels
__magic_name__ : str = embed_dim
__magic_name__ : int = hidden_sizes
__magic_name__ : Union[str, Any] = depths
__magic_name__ : List[str] = num_heads
__magic_name__ : str = window_size
__magic_name__ : Optional[Any] = mlp_ratio
__magic_name__ : Dict = qkv_bias
__magic_name__ : Dict = hidden_dropout_prob
__magic_name__ : Optional[Any] = attention_probs_dropout_prob
__magic_name__ : List[Any] = drop_path_rate
__magic_name__ : Optional[Any] = hidden_act
__magic_name__ : int = use_absolute_embeddings
__magic_name__ : Dict = patch_norm
__magic_name__ : Tuple = layer_norm_eps
__magic_name__ : List[str] = initializer_range
__magic_name__ : Optional[int] = is_training
__magic_name__ : Optional[Any] = scope
__magic_name__ : Union[str, Any] = use_labels
__magic_name__ : Optional[Any] = type_sequence_label_size
__magic_name__ : Union[str, Any] = encoder_stride
__magic_name__ : List[Any] = out_features
__magic_name__ : Union[str, Any] = out_indices
def __magic_name__ ( self ) -> str:
__magic_name__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : Optional[Any] = None
if self.use_labels:
__magic_name__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Dict = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self ) -> List[Any]:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : Any = FocalNetModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Optional[int] = model(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__magic_name__ : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : List[str] = FocalNetBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Tuple = model(lowerCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__magic_name__ : Optional[Any] = None
__magic_name__ : List[str] = FocalNetBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Union[str, Any] = model(lowerCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : Optional[int] = FocalNetForMaskedImageModeling(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : str = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__magic_name__ : Optional[int] = 1
__magic_name__ : int = FocalNetForMaskedImageModeling(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : int = self.type_sequence_label_size
__magic_name__ : Tuple = FocalNetForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : int = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__magic_name__ : Optional[int] = 1
__magic_name__ : Dict = FocalNetForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : Dict = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self ) -> int:
__magic_name__ : int = self.prepare_config_and_inputs()
__magic_name__ ,__magic_name__ ,__magic_name__ : Dict = config_and_inputs
__magic_name__ : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : str = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowercase__ : Any = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : Dict = False
lowercase__ : Dict = False
lowercase__ : int = False
lowercase__ : Tuple = False
lowercase__ : Optional[Any] = False
def __magic_name__ ( self ) -> Dict:
__magic_name__ : Optional[Any] = FocalNetModelTester(self )
__magic_name__ : int = ConfigTester(self , config_class=lowerCAmelCase__ , embed_dim=37 , has_text_modality=lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self ) -> List[str]:
return
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def __magic_name__ ( self ) -> List[str]:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def __magic_name__ ( self ) -> List[Any]:
pass
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ ,__magic_name__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__magic_name__ : Dict = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__magic_name__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ ,__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__magic_name__ : str = model_class(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Tuple = [*signature.parameters.keys()]
__magic_name__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
__magic_name__ : List[Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__magic_name__ : Optional[Any] = outputs.hidden_states
__magic_name__ : Union[str, Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# FocalNet has a different seq_length
__magic_name__ : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__magic_name__ : str = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ : Tuple = reshaped_hidden_states[0].shape
__magic_name__ : Union[str, Any] = (
reshaped_hidden_states[0].view(lowerCAmelCase__ , lowerCAmelCase__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __magic_name__ ( self ) -> str:
__magic_name__ ,__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__magic_name__ : List[Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : Optional[Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__ ( self ) -> str:
__magic_name__ ,__magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[Any] = 3
__magic_name__ : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__magic_name__ : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__magic_name__ : Tuple = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__magic_name__ : Optional[int] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : str = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width) )
@slow
def __magic_name__ ( self ) -> Union[str, Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Optional[int] = FocalNetModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ ,__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Dict = _config_zero_init(lowerCAmelCase__ )
for model_class in self.all_model_classes:
__magic_name__ : Any = model_class(config=lowerCAmelCase__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class snake_case__ ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self ) -> Optional[int]:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : int = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = self.default_image_processor
__magic_name__ : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__magic_name__ : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors="""pt""" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__magic_name__ : List[Any] = model(**lowerCAmelCase__ )
# verify the logits
__magic_name__ : Union[str, Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__magic_name__ : Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 )
@require_torch
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Any = (FocalNetBackbone,) if is_torch_available() else ()
lowercase__ : Optional[int] = FocalNetConfig
lowercase__ : Dict = False
def __magic_name__ ( self ) -> int:
__magic_name__ : Dict = FocalNetModelTester(self )
| 342 | 1 |
import logging
import os
from .state import PartialState
class UpperCAmelCase ( logging.LoggerAdapter ):
'''simple docstring'''
@staticmethod
def lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
A_ : List[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def lowerCAmelCase_ ( self , lowercase , lowercase , *lowercase , **lowercase ):
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
A_ : Union[str, Any] = kwargs.pop('main_process_only' , lowercase )
A_ : List[str] = kwargs.pop('in_order' , lowercase )
if self.isEnabledFor(lowercase ):
if self._should_log(lowercase ):
A_ , A_ : str = self.process(lowercase , lowercase )
self.logger.log(lowercase , lowercase , *lowercase , **lowercase )
elif in_order:
A_ : List[Any] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
A_ , A_ : Union[str, Any] = self.process(lowercase , lowercase )
self.logger.log(lowercase , lowercase , *lowercase , **lowercase )
state.wait_for_everyone()
def UpperCamelCase ( __lowercase : str ,__lowercase : str = None ):
'''simple docstring'''
if log_level is None:
A_ : Any = os.environ.get('ACCELERATE_LOG_LEVEL' ,__lowercase )
A_ : Any = logging.getLogger(__lowercase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__lowercase ,{} )
| 192 | import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase = {
"""vocab_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"""
),
"""squeezebert/squeezebert-mnli""": """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt""",
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase = {
"""squeezebert/squeezebert-uncased""": 512,
"""squeezebert/squeezebert-mnli""": 512,
"""squeezebert/squeezebert-mnli-headless""": 512,
}
_UpperCAmelCase = {
"""squeezebert/squeezebert-uncased""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli-headless""": {"""do_lower_case""": True},
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = SqueezeBertTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ):
"""simple docstring"""
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
A_ : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowercase ) != do_lower_case
or normalizer_state.get('strip_accents' , lowercase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowercase ) != tokenize_chinese_chars
):
A_ : Dict = getattr(lowercase , normalizer_state.pop('type' ) )
A_ : Optional[int] = do_lower_case
A_ : Optional[Any] = strip_accents
A_ : str = tokenize_chinese_chars
A_ : Any = normalizer_class(**lowercase )
A_ : Tuple = do_lower_case
def lowerCAmelCase_ ( self , lowercase , lowercase=None ):
"""simple docstring"""
A_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
A_ : Dict = [self.sep_token_id]
A_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
A_ : Dict = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
| 192 | 1 |
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def _A ( *UpperCamelCase_ : List[Any]) -> List[str]:
'''simple docstring'''
if not isinstance(UpperCamelCase_, UpperCamelCase_):
__lowercase = list(UpperCamelCase_)
for i in range(len(UpperCamelCase_)):
__lowercase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def _A ( UpperCamelCase_ : Exception) -> bool:
'''simple docstring'''
__lowercase = [
"CUDA out of memory.", # CUDA OOM
"cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU
"DefaultCPUAllocator: can't allocate memory", # CPU OOM
]
if isinstance(UpperCamelCase_, UpperCamelCase_) and len(exception.args) == 1:
return any(err in exception.args[0] for err in _statements)
return False
def _A ( UpperCamelCase_ : callable = None, UpperCamelCase_ : int = 128) -> int:
'''simple docstring'''
if function is None:
return functools.partial(UpperCamelCase_, starting_batch_size=UpperCamelCase_)
__lowercase = starting_batch_size
def decorator(*UpperCamelCase_ : List[str], **UpperCamelCase_ : Optional[Any]):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
__lowercase = list(inspect.signature(UpperCamelCase_).parameters.keys())
# Guard against user error
if len(UpperCamelCase_) < (len(UpperCamelCase_) + 1):
__lowercase = ", ".join([F"""{arg}={value}""" for arg, value in zip(params[1:], args[1:])])
raise TypeError(
F"""Batch size was passed into `{function.__name__}` as the first argument when called."""
F"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""")
while True:
if batch_size == 0:
raise RuntimeError("No executable batch size found, reached zero.")
try:
return function(UpperCamelCase_, *UpperCamelCase_, **UpperCamelCase_)
except Exception as e:
if should_reduce_batch_size(UpperCamelCase_):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 17 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : str = '''beit'''
def __init__( self ,SCREAMING_SNAKE_CASE__=81_92 ,SCREAMING_SNAKE_CASE__=7_68 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=30_72 ,SCREAMING_SNAKE_CASE__="gelu" ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=1E-12 ,SCREAMING_SNAKE_CASE__=2_24 ,SCREAMING_SNAKE_CASE__=16 ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=[3, 5, 7, 11] ,SCREAMING_SNAKE_CASE__=[1, 2, 3, 6] ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=0.4 ,SCREAMING_SNAKE_CASE__=2_56 ,SCREAMING_SNAKE_CASE__=1 ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=2_55 ,**SCREAMING_SNAKE_CASE__ ,) -> Optional[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Any = vocab_size
__SCREAMING_SNAKE_CASE :Dict = hidden_size
__SCREAMING_SNAKE_CASE :Union[str, Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE :List[str] = num_attention_heads
__SCREAMING_SNAKE_CASE :Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE :Optional[int] = hidden_act
__SCREAMING_SNAKE_CASE :Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE :Optional[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE :Optional[Any] = initializer_range
__SCREAMING_SNAKE_CASE :str = layer_norm_eps
__SCREAMING_SNAKE_CASE :int = image_size
__SCREAMING_SNAKE_CASE :Tuple = patch_size
__SCREAMING_SNAKE_CASE :Any = num_channels
__SCREAMING_SNAKE_CASE :Any = use_mask_token
__SCREAMING_SNAKE_CASE :Union[str, Any] = use_absolute_position_embeddings
__SCREAMING_SNAKE_CASE :Union[str, Any] = use_relative_position_bias
__SCREAMING_SNAKE_CASE :Union[str, Any] = use_shared_relative_position_bias
__SCREAMING_SNAKE_CASE :List[str] = layer_scale_init_value
__SCREAMING_SNAKE_CASE :Optional[Any] = drop_path_rate
__SCREAMING_SNAKE_CASE :str = use_mean_pooling
# decode head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE :Dict = out_indices
__SCREAMING_SNAKE_CASE :Optional[int] = pool_scales
# auxiliary head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE :Optional[int] = use_auxiliary_head
__SCREAMING_SNAKE_CASE :Union[str, Any] = auxiliary_loss_weight
__SCREAMING_SNAKE_CASE :Dict = auxiliary_channels
__SCREAMING_SNAKE_CASE :Optional[int] = auxiliary_num_convs
__SCREAMING_SNAKE_CASE :List[str] = auxiliary_concat_input
__SCREAMING_SNAKE_CASE :List[Any] = semantic_loss_ignore_index
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : List[Any] = version.parse('''1.11''' )
@property
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _UpperCamelCase ( self ) -> float:
"""simple docstring"""
return 1E-4 | 191 | 0 |
"""simple docstring"""
from math import factorial, radians
def UpperCamelCase_ ( lowerCAmelCase__ : float , lowerCAmelCase__ : int = 18 , lowerCAmelCase__ : int = 10 ) -> float:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
lowerCAmelCase_ : List[str] = radians(_UpperCamelCase )
lowerCAmelCase_ : List[Any] = angle_in_radians
lowerCAmelCase_ : Optional[int] = 3
lowerCAmelCase_ : Dict = -1
for _ in range(_UpperCamelCase ):
result += (b * (angle_in_radians**a)) / factorial(_UpperCamelCase )
lowerCAmelCase_ : Union[str, Any] = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 368 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def UpperCamelCase_ ( lowerCAmelCase__ : int ) -> bool:
"""simple docstring"""
lowerCAmelCase_ : int = int(number**0.5 )
return number == sq * sq
def UpperCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> tuple[int, int]:
"""simple docstring"""
lowerCAmelCase_ : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
lowerCAmelCase_ : int = x_den * y_den * z_den
lowerCAmelCase_ : int = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
top //= hcf
bottom //= hcf
return top, bottom
def UpperCamelCase_ ( lowerCAmelCase__ : int = 35 ) -> int:
"""simple docstring"""
lowerCAmelCase_ : set = set()
lowerCAmelCase_ : int
lowerCAmelCase_ : Fraction = Fraction(0 )
lowerCAmelCase_ : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
lowerCAmelCase_ : str = x_num * y_den + x_den * y_num
lowerCAmelCase_ : int = x_den * y_den
lowerCAmelCase_ : int = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase_ : List[str] = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=2
lowerCAmelCase_ : Optional[int] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
lowerCAmelCase_ : Dict = x_den * x_den * y_den * y_den
if is_sq(lowerCAmelCase__ ) and is_sq(lowerCAmelCase__ ):
lowerCAmelCase_ : Optional[int] = int(sqrt(lowerCAmelCase__ ) )
lowerCAmelCase_ : List[str] = int(sqrt(lowerCAmelCase__ ) )
lowerCAmelCase_ : Union[str, Any] = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase_ : Union[str, Any] = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=-1
lowerCAmelCase_ : Dict = x_num * y_num
lowerCAmelCase_ : Optional[int] = x_den * y_num + x_num * y_den
lowerCAmelCase_ : Any = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase_ : Tuple = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=2
lowerCAmelCase_ : List[str] = x_num * x_num * y_num * y_num
lowerCAmelCase_ : Optional[Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowerCAmelCase__ ) and is_sq(lowerCAmelCase__ ):
lowerCAmelCase_ : Tuple = int(sqrt(lowerCAmelCase__ ) )
lowerCAmelCase_ : Optional[Any] = int(sqrt(lowerCAmelCase__ ) )
lowerCAmelCase_ : Optional[int] = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase_ : Any = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
for num, den in unique_s:
total += Fraction(lowerCAmelCase__ , lowerCAmelCase__ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'{solution() = }')
| 289 | 0 |
"""simple docstring"""
def snake_case_ ( A_ : int ):
'''simple docstring'''
if num < 0:
return False
_lowerCamelCase : int = num
_lowerCamelCase : int = 0
while num > 0:
_lowerCamelCase : Optional[Any] = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = '▁'
lowerCamelCase = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class A ( UpperCamelCase_ , unittest.TestCase ):
UpperCamelCase__ : Tuple =BigBirdTokenizer
UpperCamelCase__ : Union[str, Any] =BigBirdTokenizerFast
UpperCamelCase__ : Any =True
UpperCamelCase__ : Optional[Any] =True
def lowerCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
super().setUp()
_lowerCamelCase : List[Any] =self.tokenizer_class(lowercase_ , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_lowerCamelCase : List[Any] ='<s>'
_lowerCamelCase : Optional[Any] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def lowerCamelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '[MASK]' )
self.assertEqual(len(lowercase_ ) , 1004 )
def lowerCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_lowerCamelCase : Union[str, Any] =self.get_tokenizer()
_lowerCamelCase : int =self.get_rust_tokenizer()
_lowerCamelCase : int ='I was born in 92000, and this is falsé.'
_lowerCamelCase : int =tokenizer.tokenize(lowercase_ )
_lowerCamelCase : List[Any] =rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : Any =tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
_lowerCamelCase : str =rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : str =self.get_rust_tokenizer()
_lowerCamelCase : Union[str, Any] =tokenizer.encode(lowercase_ )
_lowerCamelCase : List[Any] =rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def lowerCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : str =BigBirdTokenizer(lowercase_ , keep_accents=lowercase_ )
_lowerCamelCase : int =tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [285, 46, 10, 170, 382] , )
_lowerCamelCase : Optional[Any] =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_lowerCamelCase : Any =tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_lowerCamelCase : Optional[int] =tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def lowerCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
@slow
def lowerCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
_lowerCamelCase : List[str] ='Hello World!'
_lowerCamelCase : Tuple =[65, 1_8536, 2260, 101, 66]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def lowerCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : int =(
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
# fmt: off
_lowerCamelCase : Tuple =[65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 3_4324, 497, 391, 408, 1_1342, 1244, 385, 100, 938, 985, 456, 574, 362, 1_2597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@require_torch
@slow
def lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
_lowerCamelCase : Union[str, Any] =list(self.big_tokenizer.get_vocab().keys() )[:10]
_lowerCamelCase : List[Any] =' '.join(lowercase_ )
_lowerCamelCase : List[str] =self.big_tokenizer.encode_plus(lowercase_ , return_tensors='pt' , return_token_type_ids=lowercase_ )
_lowerCamelCase : Optional[int] =self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=lowercase_ )
_lowerCamelCase : List[str] =BigBirdConfig(attention_type='original_full' )
_lowerCamelCase : Optional[Any] =BigBirdModel(lowercase_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowercase_ )
model(**lowercase_ )
@slow
def lowerCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : Dict =BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
_lowerCamelCase : int =tokenizer.decode(tokenizer('Paris is the [MASK].' ).input_ids )
self.assertTrue(decoded_text == '[CLS] Paris is the[MASK].[SEP]' )
@slow
def lowerCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] ={'input_ids': [[65, 3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114, 66], [65, 448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='google/bigbird-roberta-base' , revision='215c99f1600e06f83acce68422f2035b2b5c3510' , )
| 199 | 0 |
"""simple docstring"""
def __lowerCamelCase ( a_ : int ) -> list[int]:
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
__SCREAMING_SNAKE_CASE :Dict = [True] * (num + 1)
__SCREAMING_SNAKE_CASE :Optional[Any] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , a_ ):
__SCREAMING_SNAKE_CASE :Optional[Any] = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num)) | 239 |
"""simple docstring"""
def __lowerCamelCase ( a_ : Union[str, Any] , a_ : Optional[Any] ) -> Union[str, Any]:
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def __lowerCamelCase ( a_ : Optional[int] , a_ : Any=0 ) -> Optional[Any]:
return sorted(a_ , key=lambda a_ : x[column] )
def __lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[int] , a_ : str=float('''inf''' ) ) -> str:
for i in range(points_counts - 1 ):
for j in range(i + 1 , a_ ):
__SCREAMING_SNAKE_CASE :Dict = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__SCREAMING_SNAKE_CASE :Optional[Any] = current_dis
return min_dis
def __lowerCamelCase ( a_ : List[Any] , a_ : Any , a_ : Optional[int]=float('''inf''' ) ) -> Optional[Any]:
for i in range(min(6 , points_counts - 1 ) , a_ ):
for j in range(max(0 , i - 6 ) , a_ ):
__SCREAMING_SNAKE_CASE :Dict = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__SCREAMING_SNAKE_CASE :int = current_dis
return min_dis
def __lowerCamelCase ( a_ : str , a_ : List[Any] , a_ : int ) -> Optional[int]:
# base case
if points_counts <= 3:
return dis_between_closest_pair(a_ , a_ )
# recursion
__SCREAMING_SNAKE_CASE :int = points_counts // 2
__SCREAMING_SNAKE_CASE :Dict = closest_pair_of_points_sqr(
a_ , points_sorted_on_y[:mid] , a_ )
__SCREAMING_SNAKE_CASE :Any = closest_pair_of_points_sqr(
a_ , points_sorted_on_y[mid:] , points_counts - mid )
__SCREAMING_SNAKE_CASE :Union[str, Any] = min(a_ , a_ )
__SCREAMING_SNAKE_CASE :str = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(a_ )
__SCREAMING_SNAKE_CASE :Dict = dis_between_closest_in_strip(
a_ , len(a_ ) , a_ )
return min(a_ , a_ )
def __lowerCamelCase ( a_ : int , a_ : Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE :Union[str, Any] = column_based_sort(a_ , column=0 )
__SCREAMING_SNAKE_CASE :int = column_based_sort(a_ , column=1 )
return (
closest_pair_of_points_sqr(
a_ , a_ , a_ )
) ** 0.5
if __name__ == "__main__":
lowerCamelCase_ = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points))) | 239 | 1 |
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
while b:
lowercase : Optional[Any] = b, a % b
return a
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
return a if b == 0 else euclidean_gcd_recursive(snake_case__ , a % b )
def _snake_case( ) -> str:
print(f"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" )
print(f"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" )
print(f"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" )
print(f"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" )
print(f"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" )
print(f"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" )
print(f"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" )
print(f"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" )
print(f"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" )
print(f"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" )
if __name__ == "__main__":
main()
| 20 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : List[str] = "codegen"
UpperCAmelCase__ : str = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, SCREAMING_SNAKE_CASE_=5_0400, SCREAMING_SNAKE_CASE_=2048, SCREAMING_SNAKE_CASE_=2048, SCREAMING_SNAKE_CASE_=4096, SCREAMING_SNAKE_CASE_=28, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="gelu_new", SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=1e-5, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=5_0256, SCREAMING_SNAKE_CASE_=5_0256, SCREAMING_SNAKE_CASE_=False, **SCREAMING_SNAKE_CASE_, ) -> Tuple:
UpperCamelCase : Tuple = vocab_size
UpperCamelCase : Optional[int] = n_ctx
UpperCamelCase : Optional[int] = n_positions
UpperCamelCase : List[str] = n_embd
UpperCamelCase : Dict = n_layer
UpperCamelCase : int = n_head
UpperCamelCase : Union[str, Any] = n_inner
UpperCamelCase : int = rotary_dim
UpperCamelCase : Optional[Any] = activation_function
UpperCamelCase : Optional[int] = resid_pdrop
UpperCamelCase : Union[str, Any] = embd_pdrop
UpperCamelCase : Optional[Any] = attn_pdrop
UpperCamelCase : List[str] = layer_norm_epsilon
UpperCamelCase : Union[str, Any] = initializer_range
UpperCamelCase : str = use_cache
UpperCamelCase : Dict = bos_token_id
UpperCamelCase : Union[str, Any] = eos_token_id
super().__init__(
bos_token_id=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_, tie_word_embeddings=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase_ ( a__ ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = "default", SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, ) -> List[str]:
super().__init__(SCREAMING_SNAKE_CASE_, task=SCREAMING_SNAKE_CASE_, patching_specs=SCREAMING_SNAKE_CASE_, use_past=SCREAMING_SNAKE_CASE_ )
if not getattr(self._config, 'pad_token_id', SCREAMING_SNAKE_CASE_ ):
# TODO: how to do that better?
UpperCamelCase : str = 0
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
UpperCamelCase : Tuple = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_, direction='inputs' )
UpperCamelCase : List[Any] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
UpperCamelCase : Optional[int] = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def snake_case_ ( self ) -> int:
return self._config.n_layer
@property
def snake_case_ ( self ) -> int:
return self._config.n_head
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = -1, SCREAMING_SNAKE_CASE_ = -1, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, ) -> Mapping[str, Any]:
UpperCamelCase : Tuple = super(SCREAMING_SNAKE_CASE_, self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE_, batch_size=SCREAMING_SNAKE_CASE_, seq_length=SCREAMING_SNAKE_CASE_, is_pair=SCREAMING_SNAKE_CASE_, framework=SCREAMING_SNAKE_CASE_ )
# We need to order the input in the way they appears in the forward()
UpperCamelCase : Optional[int] = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
UpperCamelCase , UpperCamelCase : List[str] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
UpperCamelCase : List[Any] = seqlen + 2
UpperCamelCase : List[str] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCamelCase : str = [
(torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ )) for _ in range(self.num_layers )
]
UpperCamelCase : List[Any] = common_inputs['attention_mask']
if self.use_past:
UpperCamelCase : Optional[Any] = ordered_inputs['attention_mask'].dtype
UpperCamelCase : List[Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, dtype=SCREAMING_SNAKE_CASE_ )], dim=1 )
return ordered_inputs
@property
def snake_case_ ( self ) -> int:
return 13
| 119 | 0 |
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __magic_name__ ( lowerCAmelCase ):
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : str =tempfile.mkdtemp()
_UpperCAmelCase : Optional[int] =5
# Realm tok
_UpperCAmelCase : Optional[Any] =[
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'test',
'question',
'this',
'is',
'the',
'first',
'second',
'third',
'fourth',
'fifth',
'record',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_UpperCAmelCase : int =os.path.join(self.tmpdirname , 'realm_tokenizer')
os.makedirs(snake_case , exist_ok=snake_case)
_UpperCAmelCase : List[str] =os.path.join(snake_case , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
_UpperCAmelCase : int =os.path.join(self.tmpdirname , 'realm_block_records')
os.makedirs(snake_case , exist_ok=snake_case)
def lowerCAmelCase ( self) -> RealmTokenizer:
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer'))
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =RealmConfig(num_block_records=self.num_block_records)
return config
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCAmelCase : str =Dataset.from_dict(
{
'id': ['0', '1'],
'question': ['foo', 'bar'],
'answers': [['Foo', 'Bar'], ['Bar']],
})
return dataset
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] =np.array(
[
B'This is the first record',
B'This is the second record',
B'This is the third record',
B'This is the fourth record',
B'This is the fifth record',
B'This is a longer longer longer record',
] , dtype=snake_case , )
return block_records
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : int =RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[str] =self.get_config()
_UpperCAmelCase : Optional[Any] =self.get_dummy_retriever()
_UpperCAmelCase : str =retriever.tokenizer
_UpperCAmelCase : Any =np.array([0, 3] , dtype='long')
_UpperCAmelCase : Any =tokenizer(['Test question']).input_ids
_UpperCAmelCase : str =tokenizer(
['the fourth'] , add_special_tokens=snake_case , return_token_type_ids=snake_case , return_attention_mask=snake_case , ).input_ids
_UpperCAmelCase : Dict =config.reader_seq_len
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] =retriever(
snake_case , snake_case , answer_ids=snake_case , max_length=snake_case , return_tensors='np')
self.assertEqual(len(snake_case) , 2)
self.assertEqual(len(snake_case) , 2)
self.assertEqual(len(snake_case) , 2)
self.assertEqual(concat_inputs.input_ids.shape , (2, 1_0))
self.assertEqual(concat_inputs.attention_mask.shape , (2, 1_0))
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 1_0))
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 1_0))
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0]) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1]) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , )
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCAmelCase : Any =self.get_config()
_UpperCAmelCase : Optional[int] =self.get_dummy_retriever()
_UpperCAmelCase : List[str] =retriever.tokenizer
_UpperCAmelCase : List[Any] =np.array([0, 3, 5] , dtype='long')
_UpperCAmelCase : Tuple =tokenizer(['Test question']).input_ids
_UpperCAmelCase : Union[str, Any] =tokenizer(
['the fourth', 'longer longer'] , add_special_tokens=snake_case , return_token_type_ids=snake_case , return_attention_mask=snake_case , ).input_ids
_UpperCAmelCase : Union[str, Any] =config.reader_seq_len
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] =retriever(
snake_case , snake_case , answer_ids=snake_case , max_length=snake_case , return_tensors='np')
self.assertEqual([False, True, True] , snake_case)
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , snake_case)
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , snake_case)
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCAmelCase : Dict =self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records'))
# Test local path
_UpperCAmelCase : Tuple =retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records'))
self.assertEqual(retriever.block_records[0] , B'This is the first record')
# Test mocked remote path
with patch('transformers.models.realm.retrieval_realm.hf_hub_download') as mock_hf_hub_download:
_UpperCAmelCase : Dict =os.path.join(
os.path.join(self.tmpdirname , 'realm_block_records') , _REALM_BLOCK_RECORDS_FILENAME)
_UpperCAmelCase : List[str] =RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa')
self.assertEqual(retriever.block_records[0] , B'This is the first record')
| 242 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase ={
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase =[
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase =[
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
lowercase =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 242 | 1 |
import math
def UpperCAmelCase__ ():
"""simple docstring"""
snake_case = input('''Enter message: ''' )
snake_case = int(input(F'''Enter key [2-{len(UpperCamelCase_ ) - 1}]: ''' ) )
snake_case = input('''Encryption/Decryption [e/d]: ''' )
if mode.lower().startswith('''e''' ):
snake_case = encrypt_message(UpperCamelCase_ ,UpperCamelCase_ )
elif mode.lower().startswith('''d''' ):
snake_case = decrypt_message(UpperCamelCase_ ,UpperCamelCase_ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F'''Output:\n{text + "|"}''' )
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = [''''''] * key
for col in range(UpperCamelCase_ ):
snake_case = col
while pointer < len(UpperCamelCase_ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(UpperCamelCase_ )
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = math.ceil(len(UpperCamelCase_ ) / key )
snake_case = key
snake_case = (num_cols * num_rows) - len(UpperCamelCase_ )
snake_case = [''''''] * num_cols
snake_case = 0
snake_case = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
snake_case = 0
row += 1
return "".join(UpperCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 127 |
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
_SCREAMING_SNAKE_CASE : List[str] = "hf-internal-testing/tiny-random-bert"
_SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
_SCREAMING_SNAKE_CASE : Optional[int] = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class A__ ( unittest.TestCase ):
"""simple docstring"""
def a_ ( self ):
snake_case = cached_file(__snake_case , __snake_case )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(__snake_case ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(__snake_case , __snake_case ) ) )
with open(os.path.join(__snake_case , '''refs''' , '''main''' ) ) as f:
snake_case = f.read()
self.assertEqual(__snake_case , os.path.join(__snake_case , '''snapshots''' , __snake_case , __snake_case ) )
self.assertTrue(os.path.isfile(__snake_case ) )
# File is cached at the same place the second time.
snake_case = cached_file(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
# Using a specific revision to test the full commit hash.
snake_case = cached_file(__snake_case , __snake_case , revision='''9b8c223''' )
self.assertEqual(__snake_case , os.path.join(__snake_case , '''snapshots''' , __snake_case , __snake_case ) )
def a_ ( self ):
with self.assertRaisesRegex(__snake_case , '''is not a valid model identifier''' ):
snake_case = cached_file('''tiny-random-bert''' , __snake_case )
with self.assertRaisesRegex(__snake_case , '''is not a valid git identifier''' ):
snake_case = cached_file(__snake_case , __snake_case , revision='''aaaa''' )
with self.assertRaisesRegex(__snake_case , '''does not appear to have a file named''' ):
snake_case = cached_file(__snake_case , '''conf''' )
def a_ ( self ):
with self.assertRaisesRegex(__snake_case , '''does not appear to have a file named''' ):
snake_case = cached_file(__snake_case , '''conf''' )
with open(os.path.join(__snake_case , '''refs''' , '''main''' ) ) as f:
snake_case = f.read()
self.assertTrue(os.path.isfile(os.path.join(__snake_case , '''.no_exist''' , __snake_case , '''conf''' ) ) )
snake_case = cached_file(__snake_case , '''conf''' , _raise_exceptions_for_missing_entries=__snake_case )
self.assertIsNone(__snake_case )
snake_case = cached_file(__snake_case , '''conf''' , local_files_only=__snake_case , _raise_exceptions_for_missing_entries=__snake_case )
self.assertIsNone(__snake_case )
snake_case = mock.Mock()
snake_case = 5_0_0
snake_case = {}
snake_case = HTTPError
snake_case = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=__snake_case ) as mock_head:
snake_case = cached_file(__snake_case , '''conf''' , _raise_exceptions_for_connection_errors=__snake_case )
self.assertIsNone(__snake_case )
# This check we did call the fake head request
mock_head.assert_called()
def a_ ( self ):
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __snake_case ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __snake_case ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __snake_case ) )
def a_ ( self ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(__snake_case , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , __snake_case )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(__snake_case , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , __snake_case , revision='''ahaha''' )
snake_case = get_file_from_repo('''bert-base-cased''' , __snake_case )
# The name is the cached name which is not very easy to test, so instead we load the content.
snake_case = json.loads(open(__snake_case , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 7_6_8 )
def a_ ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = Path(__snake_case ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(__snake_case , '''a.txt''' ) , str(__snake_case ) )
self.assertIsNone(get_file_from_repo(__snake_case , '''b.txt''' ) )
| 127 | 1 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 42
lowercase = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 353 |
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
__A : Tuple = "sshleifer/mar_enro_6_3_student"
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
def __lowercase ( self : List[Any] ) -> Optional[Any]:
super().setUp()
lowerCAmelCase_ : Any = cached_path(
"""https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz""" , extract_compressed_file=lowerCamelCase , )
lowerCAmelCase_ : Optional[Any] = F'{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'
@slow
@require_torch_gpu
def __lowercase ( self : str ) -> str:
MarianMTModel.from_pretrained(lowerCamelCase )
@slow
@require_torch_gpu
def __lowercase ( self : List[Any] ) -> Union[str, Any]:
lowerCAmelCase_ : str = {
"""$MAX_LEN""": 64,
"""$BS""": 64,
"""$GAS""": 1,
"""$ENRO_DIR""": self.data_dir,
"""facebook/mbart-large-cc25""": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"""--learning_rate=3e-5""": """--learning_rate 3e-4""",
"""--num_train_epochs 6""": """--num_train_epochs 1""",
}
# Clean up bash script
lowerCAmelCase_ : Dict = (self.test_file_dir / """train_mbart_cc25_enro.sh""").open().read().split("""finetune.py""" )[1].strip()
lowerCAmelCase_ : Optional[int] = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
for k, v in env_vars_to_replace.items():
lowerCAmelCase_ : Optional[int] = bash_script.replace(lowerCamelCase , str(lowerCamelCase ) )
lowerCAmelCase_ : Optional[Any] = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
lowerCAmelCase_ : Tuple = F'\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n '.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
lowerCAmelCase_ : Tuple = ["""finetune.py"""] + bash_script.split() + args
with patch.object(lowerCamelCase , """argv""" , lowerCamelCase ):
lowerCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
lowerCAmelCase_ : Any = pl.Trainer.add_argparse_args(lowerCamelCase )
lowerCAmelCase_ : List[str] = SummarizationModule.add_model_specific_args(lowerCamelCase , os.getcwd() )
lowerCAmelCase_ : Tuple = parser.parse_args()
lowerCAmelCase_ : Dict = main(lowerCamelCase )
# Check metrics
lowerCAmelCase_ : int = load_json(model.metrics_save_path )
lowerCAmelCase_ : Optional[Any] = metrics["""val"""][0]
lowerCAmelCase_ : Tuple = metrics["""val"""][-1]
self.assertEqual(len(metrics["""val"""] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'val_avg_{model.val_metric}'] , lowerCamelCase )
self.assertGreater(last_step_stats["""val_avg_gen_time"""] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["""val_avg_gen_time"""] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["""val_avg_bleu"""] - first_step_stats["""val_avg_bleu"""] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["""val_avg_bleu"""] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["""val"""][-1]["""val_avg_bleu"""] - metrics["""test"""][-1]["""test_avg_bleu"""] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
lowerCAmelCase_ : Union[str, Any] = os.listdir(lowerCamelCase )
lowerCAmelCase_ : Any = [x for x in contents if x.endswith(""".ckpt""" )][0]
lowerCAmelCase_ : Union[str, Any] = os.path.join(args.output_dir , lowerCamelCase )
lowerCAmelCase_ : int = torch.load(lowerCamelCase , map_location="""cpu""" )
lowerCAmelCase_ : List[str] = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowerCAmelCase_ : List[str] = {os.path.basename(lowerCamelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
@timeout_decorator.timeout(6_00 )
@slow
@require_torch_gpu
def __lowercase ( self : Optional[Any] ) -> Dict:
lowerCAmelCase_ : List[str] = F'{self.test_file_dir_str}/test_data/wmt_en_ro'
lowerCAmelCase_ : Dict = {
"""--fp16_opt_level=O1""": """""",
"""$MAX_LEN""": 1_28,
"""$BS""": 16,
"""$GAS""": 1,
"""$ENRO_DIR""": data_dir,
"""$m""": """sshleifer/student_marian_en_ro_6_1""",
"""val_check_interval=0.25""": """val_check_interval=1.0""",
}
# Clean up bash script
lowerCAmelCase_ : int = (
(self.test_file_dir / """distil_marian_no_teacher.sh""").open().read().split("""distillation.py""" )[1].strip()
)
lowerCAmelCase_ : str = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
lowerCAmelCase_ : Tuple = bash_script.replace("""--fp16 """ , """ """ )
for k, v in env_vars_to_replace.items():
lowerCAmelCase_ : Optional[int] = bash_script.replace(lowerCamelCase , str(lowerCamelCase ) )
lowerCAmelCase_ : int = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : Optional[Any] = bash_script.replace("""--fp16""" , """""" )
lowerCAmelCase_ : Dict = 6
lowerCAmelCase_ : List[Any] = (
["""distillation.py"""]
+ bash_script.split()
+ [
F'--output_dir={output_dir}',
"""--gpus=1""",
"""--learning_rate=1e-3""",
F'--num_train_epochs={epochs}',
"""--warmup_steps=10""",
"""--val_check_interval=1.0""",
"""--do_predict""",
]
)
with patch.object(lowerCamelCase , """argv""" , lowerCamelCase ):
lowerCAmelCase_ : Dict = argparse.ArgumentParser()
lowerCAmelCase_ : int = pl.Trainer.add_argparse_args(lowerCamelCase )
lowerCAmelCase_ : List[str] = SummarizationDistiller.add_model_specific_args(lowerCamelCase , os.getcwd() )
lowerCAmelCase_ : List[Any] = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
lowerCAmelCase_ : str = distill_main(lowerCamelCase )
# Check metrics
lowerCAmelCase_ : Union[str, Any] = load_json(model.metrics_save_path )
lowerCAmelCase_ : Union[str, Any] = metrics["""val"""][0]
lowerCAmelCase_ : Union[str, Any] = metrics["""val"""][-1]
assert len(metrics["""val"""] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'val_avg_{model.val_metric}'] , lowerCamelCase )
# check lightning ckpt can be loaded and has a reasonable statedict
lowerCAmelCase_ : Union[str, Any] = os.listdir(lowerCamelCase )
lowerCAmelCase_ : Dict = [x for x in contents if x.endswith(""".ckpt""" )][0]
lowerCAmelCase_ : Optional[int] = os.path.join(args.output_dir , lowerCamelCase )
lowerCAmelCase_ : int = torch.load(lowerCamelCase , map_location="""cpu""" )
lowerCAmelCase_ : Tuple = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowerCAmelCase_ : Union[str, Any] = {os.path.basename(lowerCamelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
| 89 | 0 |
'''simple docstring'''
def lowercase__ ( __lowercase : list[int] ) -> int:
"""simple docstring"""
if not numbers:
return 0
if not isinstance(__lowercase , (list, tuple) ) or not all(
isinstance(__lowercase , __lowercase ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
__UpperCamelCase = __UpperCamelCase = __UpperCamelCase = numbers[0]
for i in range(1 , len(__lowercase ) ):
# update the maximum and minimum subarray products
__UpperCamelCase = numbers[i]
if number < 0:
__UpperCamelCase , __UpperCamelCase = min_till_now, max_till_now
__UpperCamelCase = max(__lowercase , max_till_now * number )
__UpperCamelCase = min(__lowercase , min_till_now * number )
# update the maximum product found till now
__UpperCamelCase = max(__lowercase , __lowercase )
return max_prod
| 53 |
'''simple docstring'''
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowercase__ ( __lowercase : Features ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = np.inf
def set_batch_size(__lowercase : FeatureType ) -> None:
nonlocal batch_size
if isinstance(__lowercase , __lowercase ):
__UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__lowercase , __lowercase ):
__UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__lowercase , __lowercase ) and feature.dtype == "binary":
__UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__lowercase , __lowercase )
return None if batch_size is np.inf else batch_size
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : List[str] , __A : NestedDataStructureLike[PathLike] , __A : Optional[NamedSplit] = None , __A : Optional[Features] = None , __A : str = None , __A : bool = False , __A : bool = False , __A : Optional[int] = None , **__A : Dict , ):
super().__init__(
__A , split=__A , features=__A , cache_dir=__A , keep_in_memory=__A , streaming=__A , num_proc=__A , **__A , )
__UpperCamelCase = path_or_paths if isinstance(__A , __A ) else {self.split: path_or_paths}
__UpperCamelCase = _PACKAGED_DATASETS_MODULES['parquet'][1]
__UpperCamelCase = Parquet(
cache_dir=__A , data_files=__A , features=__A , hash=__A , **__A , )
def _lowerCamelCase ( self : Optional[int] ):
# Build iterable dataset
if self.streaming:
__UpperCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
self.builder.download_and_prepare(
download_config=__A , download_mode=__A , verification_mode=__A , base_path=__A , num_proc=self.num_proc , )
__UpperCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=__A , in_memory=self.keep_in_memory )
return dataset
class snake_case :
"""simple docstring"""
def __init__( self : List[str] , __A : Dataset , __A : Union[PathLike, BinaryIO] , __A : Optional[int] = None , **__A : Dict , ):
__UpperCamelCase = dataset
__UpperCamelCase = path_or_buf
__UpperCamelCase = batch_size or get_writer_batch_size(dataset.features )
__UpperCamelCase = parquet_writer_kwargs
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , 'wb+' ) as buffer:
__UpperCamelCase = self._write(file_obj=__A , batch_size=__A , **self.parquet_writer_kwargs )
else:
__UpperCamelCase = self._write(file_obj=self.path_or_buf , batch_size=__A , **self.parquet_writer_kwargs )
return written
def _lowerCamelCase ( self : List[str] , __A : BinaryIO , __A : int , **__A : List[str] ):
__UpperCamelCase = 0
__UpperCamelCase = parquet_writer_kwargs.pop('path_or_buf' , __A )
__UpperCamelCase = self.dataset.features.arrow_schema
__UpperCamelCase = pq.ParquetWriter(__A , schema=__A , **__A )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , __A ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
__UpperCamelCase = query_table(
table=self.dataset._data , key=slice(__A , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(__A )
written += batch.nbytes
writer.close()
return written
| 53 | 1 |
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : Union[str, Any] = """▁"""
lowercase__ : str = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""}
lowercase__ : str = {
"""sentencepiece_model_file""": """sentencepiece.bpe.model""",
"""vocab_file""": """vocab.txt""",
}
lowercase__ : int = {
"""vocab_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
},
"""sentencepiece_model_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
},
}
lowercase__ : Union[str, Any] = {
"""ernie-m-base""": 5_1_4,
"""ernie-m-large""": 5_1_4,
}
lowercase__ : Optional[int] = {
"""ernie-m-base""": {"""do_lower_case""": False},
"""ernie-m-large""": {"""do_lower_case""": False},
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""input_ids"""]
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = RESOURCE_FILES_NAMES
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : List[str]="utf8" , SCREAMING_SNAKE_CASE_ : Optional[int]="[UNK]" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="[SEP]" , SCREAMING_SNAKE_CASE_ : int="[PAD]" , SCREAMING_SNAKE_CASE_ : List[Any]="[CLS]" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="[MASK]" , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE_ : List[Any] , ):
lowerCAmelCase_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , vocab_file=lowercase_ , encoding=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
lowerCAmelCase_ : Any = do_lower_case
lowerCAmelCase_ : Union[str, Any] = sentencepiece_model_ckpt
lowerCAmelCase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowerCAmelCase_ : Union[str, Any] = self.load_vocab(filepath=lowercase_ )
else:
lowerCAmelCase_ : Any = {self.sp_model.id_to_piece(lowercase_ ): id for id in range(self.sp_model.get_piece_size() )}
lowerCAmelCase_ : int = {v: k for k, v in self.vocab.items()}
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : Dict ):
if text is None:
return None
lowerCAmelCase_ : str = self.tokenize(lowercase_ )
lowerCAmelCase_ ,lowerCAmelCase_ : Dict = '', []
for i, ch in enumerate(lowercase_ ):
if ch in self.SP_CHAR_MAPPING:
lowerCAmelCase_ : Union[str, Any] = self.SP_CHAR_MAPPING.get(lowercase_ )
else:
lowerCAmelCase_ : int = unicodedata.normalize('NFKC' , lowercase_ )
if self.is_whitespace(lowercase_ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(lowercase_ ) )
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : Union[str, Any] = normalized_text, [], 0
if self.do_lower_case:
lowerCAmelCase_ : Optional[int] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowerCAmelCase_ : List[Any] = token[1:]
lowerCAmelCase_ : Tuple = text[offset:].index(lowercase_ ) + offset
lowerCAmelCase_ : Dict = start + len(lowercase_ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowerCAmelCase_ : int = end
return token_mapping
@property
def SCREAMING_SNAKE_CASE__ ( self : str ):
return len(self.vocab )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : Dict ):
lowerCAmelCase_ : Tuple = self.__dict__.copy()
lowerCAmelCase_ : Union[str, Any] = None
return state
def __setstate__( self : int , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCAmelCase_ : List[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase_ : Any = {}
lowerCAmelCase_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict ):
return "".join((self.SP_CHAR_MAPPING.get(lowercase_ , lowercase_ ) for c in text) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : List[Any]=6_4 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 ):
if self.sp_model_kwargs.get('enable_sampling' ) is True:
lowerCAmelCase_ : Union[str, Any] = True
if self.sp_model_kwargs.get('alpha' ) is not None:
lowerCAmelCase_ : List[str] = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
lowerCAmelCase_ : Optional[int] = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
lowerCAmelCase_ : Dict = self.sp_model.EncodeAsPieces(lowercase_ )
else:
lowerCAmelCase_ : Optional[Any] = self.sp_model.SampleEncodeAsPieces(lowercase_ , lowercase_ , lowercase_ )
lowerCAmelCase_ : Optional[Any] = []
for pi, piece in enumerate(lowercase_ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(lowercase_ ) and pi != 0:
new_pieces.append(lowercase_ )
continue
else:
continue
lowerCAmelCase_ : str = 0
for i, chunk in enumerate(lowercase_ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(lowercase_ ) or self.is_punct(lowercase_ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(lowercase_ )
lowerCAmelCase_ : Any = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase_ : Optional[Any] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase_ : List[str] = i
if len(lowercase_ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase_ : Dict = ''.join(lowercase_ ).replace(lowercase_ , ' ' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict ):
lowerCAmelCase_ : int = self.convert_ids_to_tokens(lowercase_ )
lowerCAmelCase_ : Any = ''.join(lowercase_ ).replace(lowercase_ , ' ' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] ):
return self.vocab.get(lowercase_ , self.vocab.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : Optional[int] ):
return self.reverse_vocab.get(lowercase_ , self.unk_token )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase_ : int = [self.cls_token_id]
lowerCAmelCase_ : Optional[int] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : List[Any]=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowercase_ )) + [1, 1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
if token_ids_a is None:
# [CLS] X [SEP]
return (len(lowercase_ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(lowercase_ ) + 1) + [1] * (len(lowercase_ ) + 3)
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(lowercase_ ) == 1:
lowerCAmelCase_ : Any = unicodedata.category(lowercase_ )
if cat == "Zs":
return True
return False
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase_ : Optional[int] = {}
with io.open(lowercase_ , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(lowercase_ ):
lowerCAmelCase_ : str = line.rstrip('\n' )
lowerCAmelCase_ : str = int(lowercase_ )
return token_to_idx
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
lowerCAmelCase_ : Any = 0
if os.path.isdir(lowercase_ ):
lowerCAmelCase_ : Union[str, Any] = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
lowerCAmelCase_ : List[str] = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(lowercase_ , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
' Please check that the vocabulary is not corrupted!' )
lowerCAmelCase_ : Optional[int] = token_index
writer.write(token + '\n' )
index += 1
lowerCAmelCase_ : Union[str, Any] = os.path.join(lowercase_ , 'sentencepiece.bpe.model' )
with open(lowercase_ , 'wb' ) as fi:
lowerCAmelCase_ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (vocab_file,)
| 369 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict=1_3 , SCREAMING_SNAKE_CASE_ : List[Any]=7 , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : List[str]=9_9 , SCREAMING_SNAKE_CASE_ : int=1_6 , SCREAMING_SNAKE_CASE_ : List[str]=3_6 , SCREAMING_SNAKE_CASE_ : List[Any]=6 , SCREAMING_SNAKE_CASE_ : Tuple=6 , SCREAMING_SNAKE_CASE_ : List[Any]=6 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_7 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE_ : List[str]=1_6 , SCREAMING_SNAKE_CASE_ : List[str]=2 , SCREAMING_SNAKE_CASE_ : List[Any]=0.02 , SCREAMING_SNAKE_CASE_ : Dict=3 , SCREAMING_SNAKE_CASE_ : int=4 , SCREAMING_SNAKE_CASE_ : Tuple=None , ):
lowerCAmelCase_ : Any = parent
lowerCAmelCase_ : Optional[int] = batch_size
lowerCAmelCase_ : Dict = seq_length
lowerCAmelCase_ : Tuple = is_training
lowerCAmelCase_ : str = use_input_mask
lowerCAmelCase_ : Union[str, Any] = use_token_type_ids
lowerCAmelCase_ : Tuple = use_labels
lowerCAmelCase_ : Optional[int] = vocab_size
lowerCAmelCase_ : Any = embedding_size
lowerCAmelCase_ : Optional[Any] = hidden_size
lowerCAmelCase_ : str = num_hidden_layers
lowerCAmelCase_ : Optional[Any] = num_hidden_groups
lowerCAmelCase_ : Dict = num_attention_heads
lowerCAmelCase_ : Optional[Any] = intermediate_size
lowerCAmelCase_ : Any = hidden_act
lowerCAmelCase_ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase_ : int = attention_probs_dropout_prob
lowerCAmelCase_ : int = max_position_embeddings
lowerCAmelCase_ : List[Any] = type_vocab_size
lowerCAmelCase_ : Any = type_sequence_label_size
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : Tuple = num_labels
lowerCAmelCase_ : Dict = num_choices
lowerCAmelCase_ : Tuple = scope
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : str = None
if self.use_input_mask:
lowerCAmelCase_ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : List[Any] = None
if self.use_token_type_ids:
lowerCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ : List[str] = None
lowerCAmelCase_ : Union[str, Any] = None
lowerCAmelCase_ : str = None
if self.use_labels:
lowerCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase_ : Union[str, Any] = AlbertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : List[str] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = model(SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCAmelCase_ : Optional[Any] = AlbertForPreTraining(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , sentence_order_label=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ):
lowerCAmelCase_ : str = AlbertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : str = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase_ : List[str] = AlbertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : Any = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ):
lowerCAmelCase_ : Union[str, Any] = self.num_labels
lowerCAmelCase_ : Union[str, Any] = AlbertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCAmelCase_ : List[str] = self.num_labels
lowerCAmelCase_ : List[Any] = AlbertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : str = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCAmelCase_ : Optional[Any] = self.num_choices
lowerCAmelCase_ : int = AlbertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : List[Any] = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowerCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,
) : Optional[int] = config_and_inputs
lowerCAmelCase_ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowercase_, lowercase_, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = True
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str=False ):
lowerCAmelCase_ : List[str] = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : str = AlbertModelTester(self )
lowerCAmelCase_ : Optional[int] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase_ : int = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Optional[Any] = AlbertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowerCAmelCase_ : Any = AlbertModel.from_pretrained('albert-base-v2' )
lowerCAmelCase_ : Tuple = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowerCAmelCase_ : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase_ : List[Any] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
lowerCAmelCase_ : str = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 289 | 0 |
'''simple docstring'''
import math
class __a :
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : list[list[float]] , __magic_name__ : list[int] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : str = 0.0
UpperCAmelCase_ : Union[str, Any] = 0.0
for i in range(len(__magic_name__ ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : list[list[int | float]] , __magic_name__ : list[int] , __magic_name__ : int , __magic_name__ : float ) -> list[list[int | float]]:
"""simple docstring"""
for i in range(len(__magic_name__ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowerCamelCase_ ( ) -> None:
# Training Examples ( m, n )
UpperCAmelCase_ : int = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
UpperCAmelCase_ : Optional[Any] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
UpperCAmelCase_ : List[Any] = SelfOrganizingMap()
UpperCAmelCase_ : Optional[int] = 3
UpperCAmelCase_ : Optional[Any] = 0.5
for _ in range(SCREAMING_SNAKE_CASE__ ):
for j in range(len(SCREAMING_SNAKE_CASE__ ) ):
# training sample
UpperCAmelCase_ : List[str] = training_samples[j]
# Compute the winning vector
UpperCAmelCase_ : Any = self_organizing_map.get_winner(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# Update the winning vector
UpperCAmelCase_ : int = self_organizing_map.update(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# classify test sample
UpperCAmelCase_ : List[str] = [0, 0, 0, 1]
UpperCAmelCase_ : Tuple = self_organizing_map.get_winner(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# results
print(F"""Clusters that the test sample belongs to : {winner}""" )
print(F"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 125 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class __a :
def __init__( self : int , __magic_name__ : int , __magic_name__ : MutableSequence[float] ) -> None:
"""simple docstring"""
if len(__magic_name__ ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
UpperCAmelCase_ : list[float] = list(__magic_name__ )
UpperCAmelCase_ : List[str] = degree
def __add__( self : List[str] , __magic_name__ : Polynomial ) -> Polynomial:
"""simple docstring"""
if self.degree > polynomial_a.degree:
UpperCAmelCase_ : Dict = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , __magic_name__ )
else:
UpperCAmelCase_ : List[str] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , __magic_name__ )
def __sub__( self : Dict , __magic_name__ : Polynomial ) -> Polynomial:
"""simple docstring"""
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : List[Any] ) -> Polynomial:
"""simple docstring"""
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : str , __magic_name__ : Polynomial ) -> Polynomial:
"""simple docstring"""
UpperCAmelCase_ : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , __magic_name__ )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : int | float ) -> int | float:
"""simple docstring"""
UpperCAmelCase_ : int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Optional[int] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(__magic_name__ )
return polynomial
def __repr__( self : List[Any] ) -> str:
"""simple docstring"""
return self.__str__()
def UpperCAmelCase__ ( self : List[str] ) -> Polynomial:
"""simple docstring"""
UpperCAmelCase_ : list[float] = [0] * self.degree
for i in range(self.degree ):
UpperCAmelCase_ : List[str] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , __magic_name__ )
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : int | float = 0 ) -> Polynomial:
"""simple docstring"""
UpperCAmelCase_ : list[float] = [0] * (self.degree + 2)
UpperCAmelCase_ : Union[str, Any] = constant
for i in range(self.degree + 1 ):
UpperCAmelCase_ : Optional[Any] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , __magic_name__ )
def __eq__( self : Any , __magic_name__ : object ) -> bool:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : List[Any] , __magic_name__ : object ) -> bool:
"""simple docstring"""
return not self.__eq__(__magic_name__ )
| 125 | 1 |
"""simple docstring"""
__A = """
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
__A = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
__A = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 254 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__A = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 254 | 1 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "spiece.model"}
UpperCAmelCase__ = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
UpperCAmelCase__ = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
UpperCAmelCase__ = "▁"
class __lowerCAmelCase ( lowerCAmelCase_ ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict , A : List[Any] , A : str="</s>" , A : List[Any]="<unk>" , A : Union[str, Any]="<pad>" , A : List[Any]=1_00 , A : str=None , A : Optional[Dict[str, Any]] = None , A : Optional[Any]=True , **A : List[str] , ) -> None:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
_UpperCAmelCase = [F"<extra_id_{i}>" for i in range(_snake_case)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_UpperCAmelCase = len(set(filter(lambda A: bool('extra_id' in str(_snake_case)) , _snake_case)))
if extra_tokens != extra_ids:
raise ValueError(
F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens')
if legacy:
logger.warning_once(
F"You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565')
_UpperCAmelCase = legacy
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_snake_case , unk_token=_snake_case , pad_token=_snake_case , extra_ids=_snake_case , additional_special_tokens=_snake_case , sp_model_kwargs=self.sp_model_kwargs , legacy=_snake_case , **_snake_case , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = extra_ids
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_snake_case)
@staticmethod
def _lowerCamelCase ( A : Optional[int] , A : Optional[int] , A : List[Any]) -> Tuple:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_UpperCAmelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F" {pretrained_model_name_or_path} automatically truncating your input to"
F" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
F" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , _snake_case , )
return max_model_length
@property
def _lowerCamelCase ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
return self.sp_model.get_piece_size() + self._extra_ids
def _lowerCamelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = {self.convert_ids_to_tokens(_snake_case): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _lowerCamelCase ( self : List[Any] , A : List[int] , A : Optional[List[int]] = None , A : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case)
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_snake_case)) + [1]
return ([0] * len(_snake_case)) + [1] + ([0] * len(_snake_case)) + [1]
def _lowerCamelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
return list(
set(filter(lambda A: bool(re.search(R'<extra_id_\d+>' , _snake_case)) is not None , self.additional_special_tokens)))
def _lowerCamelCase ( self : Any) -> str:
"""simple docstring"""
return [self._convert_token_to_id(_snake_case) for token in self.get_sentinel_tokens()]
def _lowerCamelCase ( self : Dict , A : List[int]) -> List[int]:
"""simple docstring"""
if len(_snake_case) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
' eos tokens being added.')
return token_ids
else:
return token_ids + [self.eos_token_id]
def _lowerCamelCase ( self : Optional[Any] , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def _lowerCamelCase ( self : int , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = self._add_eos_if_not_present(_snake_case)
if token_ids_a is None:
return token_ids_a
else:
_UpperCAmelCase = self._add_eos_if_not_present(_snake_case)
return token_ids_a + token_ids_a
def __getstate__( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
return state
def __setstate__( self : Optional[int] , A : Optional[int]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def _lowerCamelCase ( self : Tuple , A : "TextInput" , **A : Union[str, Any]) -> List[str]:
"""simple docstring"""
if not self.legacy:
_UpperCAmelCase = SPIECE_UNDERLINE + text.replace(_snake_case , ' ')
return super().tokenize(_snake_case , **_snake_case)
def _lowerCamelCase ( self : int , A : Optional[Any] , **A : Optional[Any]) -> int:
"""simple docstring"""
if not self.legacy:
_UpperCAmelCase = text.startswith(_snake_case)
if is_first:
_UpperCAmelCase = text[1:]
_UpperCAmelCase = self.sp_model.encode(_snake_case , out_type=_snake_case)
if not self.legacy and not is_first and not text.startswith(' ') and tokens[0].startswith(_snake_case):
_UpperCAmelCase = ([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:]
return tokens
def _lowerCamelCase ( self : Union[str, Any] , A : List[Any]) -> int:
"""simple docstring"""
if token.startswith('<extra_id_'):
_UpperCAmelCase = re.match(R'<extra_id_(\d+)>' , _snake_case)
_UpperCAmelCase = int(match.group(1))
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(_snake_case)
def _lowerCamelCase ( self : int , A : int) -> int:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
_UpperCAmelCase = self.sp_model.IdToPiece(_snake_case)
else:
_UpperCAmelCase = F"<extra_id_{self.vocab_size - 1 - index}>"
return token
def _lowerCamelCase ( self : Optional[Any] , A : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = ''
_UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_snake_case) + token
_UpperCAmelCase = True
_UpperCAmelCase = []
else:
current_sub_tokens.append(_snake_case)
_UpperCAmelCase = False
out_string += self.sp_model.decode(_snake_case)
return out_string.strip()
def _lowerCamelCase ( self : int , A : str , A : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_snake_case):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_UpperCAmelCase = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_snake_case) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _snake_case)
elif not os.path.isfile(self.vocab_file):
with open(_snake_case , 'wb') as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_snake_case)
return (out_vocab_file,)
| 339 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
a_ :Any = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
a_ :List[str] = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
a_ :List[str] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ ( datasets.Metric ):
"""simple docstring"""
def lowercase_ ( self : str ) ->MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string', id='token' ), id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string', id='token' ), id='sequence' ), id='references' ),
} ), )
def lowercase_ ( self : str, _snake_case : List[List[List[str]]], _snake_case : List[List[str]], _snake_case : int = 1, _snake_case : int = 4, ) ->Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_snake_case, hypotheses=_snake_case, min_len=_snake_case, max_len=_snake_case )
}
| 277 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCamelCase_ = None
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase_ = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase_ = {
'facebook/nllb-large-en-ro': 1024,
'facebook/nllb-200-distilled-600M': 1024,
}
# fmt: off
UpperCamelCase_ = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str] = VOCAB_FILES_NAMES
a_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a_ : List[str] = ["""input_ids""", """attention_mask"""]
a_ : Union[str, Any] = NllbTokenizer
a_ : List[int] = []
a_ : List[int] = []
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=False , **__UpperCAmelCase , ) ->Optional[Any]:
# Mask token behave like a normal word, i.e. include the space before it
a_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase) if isinstance(__UpperCAmelCase , __UpperCAmelCase) else mask_token
a_ = legacy_behaviour
super().__init__(
vocab_file=__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , src_lang=__UpperCAmelCase , tgt_lang=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , legacy_behaviour=__UpperCAmelCase , **__UpperCAmelCase , )
a_ = vocab_file
a_ = False if not self.vocab_file else True
a_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens})
a_ = {
lang_code: self.convert_tokens_to_ids(__UpperCAmelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
a_ = src_lang if src_lang is not None else "eng_Latn"
a_ = self.convert_tokens_to_ids(self._src_lang)
a_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def UpperCAmelCase__ ( self) ->str:
return self._src_lang
@src_lang.setter
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->None:
a_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->List[int]:
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase) ->List[Any]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
a_ = src_lang
a_ = self(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase)
a_ = self.convert_tokens_to_ids(__UpperCAmelCase)
a_ = tgt_lang_id
return inputs
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = "eng_Latn" , __UpperCAmelCase = None , __UpperCAmelCase = "fra_Latn" , **__UpperCAmelCase , ) ->BatchEncoding:
a_ = src_lang
a_ = tgt_lang
return super().prepare_seqaseq_batch(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Any:
return self.set_src_lang_special_tokens(self.src_lang)
def UpperCAmelCase__ ( self) ->Optional[int]:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->None:
a_ = self.convert_tokens_to_ids(__UpperCAmelCase)
if self.legacy_behaviour:
a_ = []
a_ = [self.eos_token_id, self.cur_lang_code]
else:
a_ = [self.cur_lang_code]
a_ = [self.eos_token_id]
a_ = self.convert_ids_to_tokens(self.prefix_tokens)
a_ = self.convert_ids_to_tokens(self.suffix_tokens)
a_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->None:
a_ = self.convert_tokens_to_ids(__UpperCAmelCase)
if self.legacy_behaviour:
a_ = []
a_ = [self.eos_token_id, self.cur_lang_code]
else:
a_ = [self.cur_lang_code]
a_ = [self.eos_token_id]
a_ = self.convert_ids_to_tokens(self.prefix_tokens)
a_ = self.convert_ids_to_tokens(self.suffix_tokens)
a_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__UpperCAmelCase):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''')
return
a_ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__UpperCAmelCase):
copyfile(self.vocab_file , __UpperCAmelCase)
return (out_vocab_file,) | 352 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->None:
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase) | 303 | 0 |
class lowercase__ :
def __init__( self : List[str] , UpperCAmelCase_ : List[str] ):
# we need a list not a string, so do something to change the type
SCREAMING_SNAKE_CASE__ = arr.split(',' )
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = [int(self.array[0] )] * len(self.array )
SCREAMING_SNAKE_CASE__ = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
SCREAMING_SNAKE_CASE__ = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
SCREAMING_SNAKE_CASE__ = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
__snake_case = input("""please input some numbers:""")
__snake_case = SubArray(whole_array)
__snake_case = array.solve_sub_array()
print(("""the results is:""", re))
| 176 |
import os
def _lowercase ( ) -> List[str]:
'''simple docstring'''
with open(os.path.dirname(UpperCamelCase_ ) + '/p022_names.txt' ) as file:
SCREAMING_SNAKE_CASE__ = str(file.readlines()[0] )
SCREAMING_SNAKE_CASE__ = names.replace('"' , '' ).split(',' )
names.sort()
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
for i, name in enumerate(UpperCamelCase_ ):
for letter in name:
name_score += ord(UpperCamelCase_ ) - 64
total_score += (i + 1) * name_score
SCREAMING_SNAKE_CASE__ = 0
return total_score
if __name__ == "__main__":
print(solution())
| 176 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def a( A : Optional[Any] ) -> Tuple:
"""simple docstring"""
a = 384
a = 7
if "tiny" in model_name:
a = 96
a = (2, 2, 6, 2)
a = (3, 6, 12, 24)
elif "small" in model_name:
a = 96
a = (2, 2, 18, 2)
a = (3, 6, 12, 24)
elif "base" in model_name:
a = 128
a = (2, 2, 18, 2)
a = (4, 8, 16, 32)
a = 12
a = 512
elif "large" in model_name:
a = 192
a = (2, 2, 18, 2)
a = (6, 12, 24, 48)
a = 12
a = 768
# set label information
a = 150
a = "huggingface/label-files"
a = "ade20k-id2label.json"
a = json.load(open(hf_hub_download(A , A , repo_type="dataset" ) , "r" ) )
a = {int(A ): v for k, v in idalabel.items()}
a = {v: k for k, v in idalabel.items()}
a = SwinConfig(
embed_dim=A , depths=A , num_heads=A , window_size=A , out_features=["stage1", "stage2", "stage3", "stage4"] , )
a = UperNetConfig(
backbone_config=A , auxiliary_in_channels=A , num_labels=A , idalabel=A , labelaid=A , )
return config
def a( A : Optional[Any] ) -> Tuple:
"""simple docstring"""
a = []
# fmt: off
# stem
rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def a( A : List[str] , A : List[str] , A : Dict ) -> Any:
"""simple docstring"""
a = dct.pop(A )
a = val
def a( A : str , A : List[str] ) -> List[Any]:
"""simple docstring"""
a = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
a = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
a = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
a = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
a = in_proj_weight[:dim, :]
a = in_proj_bias[: dim]
a = in_proj_weight[
dim : dim * 2, :
]
a = in_proj_bias[
dim : dim * 2
]
a = in_proj_weight[
-dim :, :
]
a = in_proj_bias[-dim :]
# fmt: on
def a( A : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
a , a = x.shape
a = x.reshape(A , 4 , in_channel // 4 )
a = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(A , A )
return x
def a( A : int ) -> Dict:
"""simple docstring"""
a , a = x.shape
a = x.reshape(A , in_channel // 4 , 4 )
a = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(A , A )
return x
def a( A : List[Any] ) -> Dict:
"""simple docstring"""
a = x.shape[0]
a = x.reshape(4 , in_channel // 4 )
a = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(A )
return x
def a( A : Optional[Any] ) -> List[str]:
"""simple docstring"""
a = x.shape[0]
a = x.reshape(in_channel // 4 , 4 )
a = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(A )
return x
def a( A : Any , A : int , A : Dict ) -> Union[str, Any]:
"""simple docstring"""
a = {
"upernet-swin-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth",
"upernet-swin-small": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth",
"upernet-swin-base": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth",
"upernet-swin-large": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth",
}
a = model_name_to_url[model_name]
a = torch.hub.load_state_dict_from_url(A , map_location="cpu" , file_name=A )[
"state_dict"
]
for name, param in state_dict.items():
print(A , param.shape )
a = get_upernet_config(A )
a = UperNetForSemanticSegmentation(A )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
a = state_dict.pop(A )
if "bn" in key:
a = key.replace("bn" , "batch_norm" )
a = val
# rename keys
a = create_rename_keys(A )
for src, dest in rename_keys:
rename_key(A , A , A )
read_in_q_k_v(A , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
a = reverse_correct_unfold_reduction_order(A )
if "norm" in key:
a = reverse_correct_unfold_norm_order(A )
model.load_state_dict(A )
# verify on image
a = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
a = Image.open(requests.get(A , stream=A ).raw ).convert("RGB" )
a = SegformerImageProcessor()
a = processor(A , return_tensors="pt" ).pixel_values
with torch.no_grad():
a = model(A )
a = outputs.logits
print(logits.shape )
print("First values of logits:" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
a = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
a = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
a = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
a = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , A , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(A )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
_lowercase: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-swin-tiny",
type=str,
choices=[F"""upernet-swin-{size}""" for size in ["tiny", "small", "base", "large"]],
help="Name of the Swin + UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowercase: int = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 361 |
# using dfs for finding eulerian path traversal
def a( A : int , A : Optional[Any] , A : Any , A : Optional[int]=None ) -> List[str]:
"""simple docstring"""
a = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
a , a = True, True
a = dfs(A , A , A , A )
return path
def a( A : List[str] , A : Optional[int] ) -> List[str]:
"""simple docstring"""
a = 0
a = -1
for i in range(A ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
a = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def a( A : str , A : str ) -> List[Any]:
"""simple docstring"""
a = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
a , a = check_circuit_or_path(A , A )
if check == 3:
print("graph is not Eulerian" )
print("no path" )
return
a = 1
if check == 2:
a = odd_node
print("graph has a Euler path" )
if check == 1:
print("graph has a Euler cycle" )
a = dfs(A , A , A )
print(A )
def a( ) -> int:
"""simple docstring"""
a = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
a = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
a = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
a = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
a = {
1: [],
2: []
# all degree is zero
}
a = 10
check_euler(A , A )
check_euler(A , A )
check_euler(A , A )
check_euler(A , A )
check_euler(A , A )
if __name__ == "__main__":
main()
| 71 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.