code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowerCamelCase : Dict = '0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase=None ) -> Union[str, Any]:
if rng is None:
snake_case : Any = random.Random()
snake_case : List[Any] = 1
for dim in shape:
total_dims *= dim
snake_case : Optional[int] = []
for _ in range(lowercase ):
values.append(rng.randint(0 ,vocab_size - 1 ) )
snake_case : int = np.array(lowercase ,dtype=jnp.intaa ).reshape(lowercase )
return output
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=None ) -> Optional[Any]:
snake_case : Any = ids_tensor(lowercase ,vocab_size=2 ,rng=lowercase )
# make sure that at least one token is attended to for each batch
snake_case : Optional[int] = 1
return attn_mask
@require_flax
class __lowercase :
"""simple docstring"""
_snake_case = None
_snake_case = ()
def UpperCAmelCase ( self ) -> List[Any]:
snake_case , snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
snake_case : List[str] = 2
snake_case : Dict = inputs["""input_ids"""].shape[-1] // 2
snake_case : Optional[Any] = inputs["""input_ids"""][:max_batch_size, :sequence_length]
snake_case : Optional[Any] = jnp.ones_like(A )
snake_case : int = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
snake_case : int = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
snake_case : List[str] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case , snake_case , snake_case , snake_case : str = self._get_input_ids_and_config()
snake_case : List[Any] = False
snake_case : Dict = max_length
snake_case : List[str] = 0
for model_class in self.all_generative_model_classes:
snake_case : Tuple = model_class(A )
snake_case : Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
snake_case : int = getattr(A , A )
snake_case : Tuple = pt_model_class(A ).eval()
snake_case : int = load_flax_weights_in_pytorch_model(A , flax_model.params )
snake_case : str = flax_model.generate(A ).sequences
snake_case : List[str] = pt_model.generate(torch.tensor(A , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
snake_case : int = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def UpperCAmelCase ( self ) -> List[str]:
snake_case , snake_case , snake_case , snake_case : List[str] = self._get_input_ids_and_config()
snake_case : Dict = False
snake_case : Optional[int] = max_length
for model_class in self.all_generative_model_classes:
snake_case : Tuple = model_class(A )
snake_case : List[str] = model.generate(A ).sequences
self.assertEqual(generation_outputs.shape[-1] , A )
snake_case : Dict = jit(model.generate )
snake_case : Union[str, Any] = jit_generate(A ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case , snake_case , snake_case , snake_case : Tuple = self._get_input_ids_and_config()
snake_case : Tuple = True
snake_case : Dict = max_length
for model_class in self.all_generative_model_classes:
snake_case : Optional[int] = model_class(A )
snake_case : int = model.generate(A ).sequences
self.assertEqual(generation_outputs.shape[-1] , A )
snake_case : Tuple = jit(model.generate )
snake_case : Optional[Any] = jit_generate(A ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self ) -> Dict:
snake_case , snake_case , snake_case , snake_case : List[str] = self._get_input_ids_and_config()
snake_case : Dict = False
snake_case : List[Any] = max_length
snake_case : List[Any] = 2
for model_class in self.all_generative_model_classes:
snake_case : Optional[int] = model_class(A )
snake_case : Optional[int] = model.generate(A ).sequences
self.assertEqual(generation_outputs.shape[-1] , A )
snake_case : str = jit(model.generate )
snake_case : List[Any] = jit_generate(A ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case , snake_case , snake_case , snake_case : List[str] = self._get_input_ids_and_config()
snake_case : Tuple = False
snake_case : Tuple = max_length
snake_case : str = 2
snake_case : Tuple = 2
for model_class in self.all_generative_model_classes:
snake_case : int = model_class(A )
snake_case : Tuple = model.generate(A ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case , snake_case , snake_case , snake_case : List[str] = self._get_input_ids_and_config()
snake_case : Tuple = True
snake_case : str = max_length
snake_case : Union[str, Any] = 0.8
snake_case : Tuple = 1_0
snake_case : str = 0.3
snake_case : List[Any] = 1
snake_case : int = 8
snake_case : Union[str, Any] = 9
for model_class in self.all_generative_model_classes:
snake_case : Dict = model_class(A )
snake_case : Tuple = model.generate(A ).sequences
self.assertEqual(generation_outputs.shape[-1] , A )
snake_case : Dict = jit(model.generate )
snake_case : int = jit_generate(A ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case , snake_case , snake_case , snake_case : str = self._get_input_ids_and_config()
snake_case : Dict = max_length
snake_case : List[str] = 1
snake_case : List[Any] = 8
snake_case : List[Any] = 9
for model_class in self.all_generative_model_classes:
snake_case : List[str] = model_class(A )
snake_case : Optional[int] = model.generate(A ).sequences
self.assertEqual(generation_outputs.shape[-1] , A )
snake_case : str = jit(model.generate )
snake_case : str = jit_generate(A ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self ) -> int:
snake_case , snake_case , snake_case , snake_case : List[Any] = self._get_input_ids_and_config()
snake_case : List[Any] = max_length
snake_case : Dict = 2
snake_case : Optional[Any] = 1
snake_case : Any = 8
snake_case : List[Any] = 9
for model_class in self.all_generative_model_classes:
snake_case : Union[str, Any] = model_class(A )
snake_case : List[str] = model.generate(A ).sequences
self.assertEqual(generation_outputs.shape[-1] , A )
snake_case : Optional[Any] = jit(model.generate )
snake_case : Optional[int] = jit_generate(A ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case , snake_case , snake_case , snake_case : int = self._get_input_ids_and_config()
# pad attention mask on the left
snake_case : List[str] = attention_mask.at[(0, 0)].set(0 )
snake_case : str = False
snake_case : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
snake_case : List[str] = model_class(A )
snake_case : Any = model.generate(A , attention_mask=A ).sequences
self.assertEqual(generation_outputs.shape[-1] , A )
snake_case : Optional[int] = jit(model.generate )
snake_case : Optional[int] = jit_generate(A , attention_mask=A ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self ) -> Tuple:
snake_case , snake_case , snake_case , snake_case : List[str] = self._get_input_ids_and_config()
# pad attention mask on the left
snake_case : List[Any] = attention_mask.at[(0, 0)].set(0 )
snake_case : Optional[int] = True
snake_case : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
snake_case : Tuple = model_class(A )
snake_case : Any = model.generate(A , attention_mask=A ).sequences
self.assertEqual(generation_outputs.shape[-1] , A )
snake_case : List[str] = jit(model.generate )
snake_case : str = jit_generate(A , attention_mask=A ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self ) -> str:
snake_case , snake_case , snake_case , snake_case : Dict = self._get_input_ids_and_config()
# pad attention mask on the left
snake_case : Dict = attention_mask.at[(0, 0)].set(0 )
snake_case : Dict = 2
snake_case : Tuple = max_length
for model_class in self.all_generative_model_classes:
snake_case : List[str] = model_class(A )
snake_case : Optional[int] = model.generate(A , attention_mask=A ).sequences
self.assertEqual(generation_outputs.shape[-1] , A )
snake_case : Any = jit(model.generate )
snake_case : Dict = jit_generate(A , attention_mask=A ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
snake_case : int = FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
snake_case : Any = """Hello world"""
snake_case : Optional[int] = tokenizer(A , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(A , """do_samples""" ):
model.generate(A , do_samples=A )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(A , """foo""" ):
snake_case : List[str] = {"""foo""": """bar"""}
model.generate(A , **A )
| 684 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Any = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
lowerCamelCase : Any = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
lowerCamelCase : Optional[int] = {
'jukebox': 5_1_2,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_LYRIC_TOKENS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A , A , A=["v3", "v2", "v2"] , A=5_1_2 , A=5 , A="<|endoftext|>" , **A , ) -> Optional[Any]:
snake_case : Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
super().__init__(
unk_token=A , n_genres=A , version=A , max_n_lyric_tokens=A , **A , )
snake_case : Optional[Any] = version
snake_case : Optional[Any] = max_n_lyric_tokens
snake_case : Tuple = n_genres
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : Union[str, Any] = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : str = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : List[str] = json.load(A )
snake_case : Tuple = r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 7_9:
snake_case : Optional[Any] = oov.replace(r"""\-'""" , r"""\-+'""" )
snake_case : Optional[Any] = regex.compile(A )
snake_case : Optional[Any] = {v: k for k, v in self.artists_encoder.items()}
snake_case : int = {v: k for k, v in self.genres_encoder.items()}
snake_case : List[Any] = {v: k for k, v in self.lyrics_encoder.items()}
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def UpperCAmelCase ( self ) -> str:
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def UpperCAmelCase ( self , A , A , A ) -> Optional[Any]:
snake_case : Optional[int] = [self.artists_encoder.get(A , 0 ) for artist in list_artists]
for genres in range(len(A ) ):
snake_case : Optional[int] = [self.genres_encoder.get(A , 0 ) for genre in list_genres[genres]]
snake_case : Union[str, Any] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
snake_case : Optional[Any] = [[self.lyrics_encoder.get(A , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def UpperCAmelCase ( self , A ) -> List[str]:
return list(A )
def UpperCAmelCase ( self , A , A , A , **A ) -> List[str]:
snake_case , snake_case , snake_case : Any = self.prepare_for_tokenization(A , A , A )
snake_case : Tuple = self._tokenize(A )
return artist, genre, lyrics
def UpperCAmelCase ( self , A , A , A , A = False ) -> Tuple[str, str, str, Dict[str, Any]]:
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
snake_case : Tuple = artists[idx].lower()
snake_case : List[Any] = [genres[idx].lower()]
else:
snake_case : Union[str, Any] = self._normalize(artists[idx] ) + """.v2"""
snake_case : Any = [
self._normalize(A ) + """.v2""" for genre in genres[idx].split("""_""" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
snake_case : str = regex.compile(r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" )
snake_case : Dict = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
snake_case : Union[str, Any] = {vocab[index]: index + 1 for index in range(len(A ) )}
snake_case : Optional[int] = 0
snake_case : Union[str, Any] = len(A ) + 1
snake_case : Optional[int] = self.vocab
snake_case : str = {v: k for k, v in self.vocab.items()}
snake_case : int = """"""
else:
snake_case : Optional[int] = regex.compile(r"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""" )
snake_case : int = self._run_strip_accents(A )
snake_case : Any = lyrics.replace("""\\""" , """\n""" )
snake_case : Tuple = self.out_of_vocab.sub("""""" , A ), [], []
return artists, genres, lyrics
def UpperCAmelCase ( self , A ) -> List[Any]:
snake_case : int = unicodedata.normalize("""NFD""" , A )
snake_case : int = []
for char in text:
snake_case : Optional[Any] = unicodedata.category(A )
if cat == "Mn":
continue
output.append(A )
return "".join(A )
def UpperCAmelCase ( self , A ) -> str:
snake_case : Dict = (
[chr(A ) for i in range(ord("""a""" ) , ord("""z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""A""" ) , ord("""Z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""0""" ) , ord("""9""" ) + 1 )]
+ ["""."""]
)
snake_case : Dict = frozenset(A )
snake_case : Dict = re.compile(r"""_+""" )
snake_case : str = """""".join([c if c in accepted else """_""" for c in text.lower()] )
snake_case : List[Any] = pattern.sub("""_""" , A ).strip("""_""" )
return text
def UpperCAmelCase ( self , A ) -> str:
return " ".join(A )
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[Any]:
# Convert to TensorType
if not isinstance(A , A ):
snake_case : Tuple = TensorType(A )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""" )
import tensorflow as tf
snake_case : Union[str, Any] = tf.constant
snake_case : int = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""" )
import torch
snake_case : List[str] = torch.tensor
snake_case : Optional[Any] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""" )
import jax.numpy as jnp # noqa: F811
snake_case : Optional[int] = jnp.array
snake_case : Dict = _is_jax
else:
snake_case : List[str] = np.asarray
snake_case : Tuple = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
snake_case : Any = [inputs]
if not is_tensor(A ):
snake_case : List[Any] = as_tensor(A )
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""" )
return inputs
def __call__( self , A , A , A="" , A="pt" ) -> BatchEncoding:
snake_case : List[str] = [0, 0, 0]
snake_case : List[str] = [artist] * len(self.version )
snake_case : List[Any] = [genres] * len(self.version )
snake_case , snake_case , snake_case : Optional[int] = self.tokenize(A , A , A )
snake_case , snake_case , snake_case : int = self._convert_token_to_id(A , A , A )
snake_case : Any = [-INFINITY] * len(full_tokens[-1] )
snake_case : int = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=A )
for i in range(len(self.version ) )
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks} )
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=A ) )
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=A ) )
snake_case : Tuple = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=A ) )
return (artists_file, genres_file, lyrics_file)
def UpperCAmelCase ( self , A , A , A ) -> List[Any]:
snake_case : Optional[int] = self.artists_decoder.get(A )
snake_case : Optional[Any] = [self.genres_decoder.get(A ) for genre in genres_index]
snake_case : Optional[int] = [self.lyrics_decoder.get(A ) for character in lyric_index]
return artist, genres, lyrics
| 684 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase : Any = logging.get_logger(__name__)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""pixel_values"""]
def __init__( self , A = True , A = None , A = None , A = PILImageResampling.BILINEAR , A = True , A = 1 / 2_5_5 , A = True , A = None , A = None , **A , ) -> None:
super().__init__(**A )
snake_case : str = size if size is not None else {"""shortest_edge""": 3_8_4}
snake_case : Tuple = get_size_dict(A , default_to_square=A )
snake_case : Union[str, Any] = do_resize
snake_case : Optional[int] = size
# Default value set here for backwards compatibility where the value in config is None
snake_case : str = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
snake_case : str = resample
snake_case : Optional[int] = do_rescale
snake_case : Tuple = rescale_factor
snake_case : Tuple = do_normalize
snake_case : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase ( self , A , A , A , A = PILImageResampling.BICUBIC , A = None , **A , ) -> np.ndarray:
snake_case : Tuple = get_size_dict(A , default_to_square=A )
if "shortest_edge" not in size:
raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
snake_case : str = size["""shortest_edge"""]
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
snake_case : str = int(shortest_edge / crop_pct )
snake_case : Dict = get_resize_output_image_size(A , size=A , default_to_square=A )
snake_case : Optional[Any] = resize(image=A , size=A , resample=A , data_format=A , **A )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=A , size=(shortest_edge, shortest_edge) , data_format=A , **A )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
A , size=(shortest_edge, shortest_edge) , resample=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> Union[str, Any]:
return rescale(A , scale=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A , A = None , **A , ) -> np.ndarray:
return normalize(A , mean=A , std=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
snake_case : List[str] = do_resize if do_resize is not None else self.do_resize
snake_case : Any = crop_pct if crop_pct is not None else self.crop_pct
snake_case : Dict = resample if resample is not None else self.resample
snake_case : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : Tuple = do_normalize if do_normalize is not None else self.do_normalize
snake_case : Dict = image_mean if image_mean is not None else self.image_mean
snake_case : int = image_std if image_std is not None else self.image_std
snake_case : int = size if size is not None else self.size
snake_case : Tuple = get_size_dict(A , default_to_square=A )
snake_case : Any = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case : int = [to_numpy_array(A ) for image in images]
if do_resize:
snake_case : List[Any] = [self.resize(image=A , size=A , crop_pct=A , resample=A ) for image in images]
if do_rescale:
snake_case : Dict = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
snake_case : Optional[int] = [self.normalize(image=A , mean=A , std=A ) for image in images]
snake_case : str = [to_channel_dimension_format(A , A ) for image in images]
snake_case : Optional[Any] = {"""pixel_values""": images}
return BatchFeature(data=A , tensor_type=A )
| 684 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
snake_case : str = len(lowercase )
snake_case : Tuple = []
for i in range(len(lowercase ) - pat_len + 1 ):
snake_case : str = True
for j in range(lowercase ):
if s[i + j] != pattern[j]:
snake_case : Dict = False
break
if match_found:
position.append(lowercase )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 684 | 1 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowercase (UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = VideoToVideoSDPipeline
_snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""}
_snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""}
_snake_case = PipelineTesterMixin.required_optional_params - {"""latents"""}
_snake_case = False
# No `output_type`.
_snake_case = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def UpperCAmelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
snake_case : int = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=3_2 , attention_head_dim=4 , )
snake_case : Dict = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_one=A , )
torch.manual_seed(0 )
snake_case : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
snake_case : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="""gelu""" , projection_dim=5_1_2 , )
snake_case : str = CLIPTextModel(A )
snake_case : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def UpperCAmelCase ( self , A , A=0 ) -> str:
# 3 frames
snake_case : List[str] = floats_tensor((1, 3, 3, 3_2, 3_2) , rng=random.Random(A ) ).to(A )
if str(A ).startswith("""mps""" ):
snake_case : Any = torch.manual_seed(A )
else:
snake_case : Any = torch.Generator(device=A ).manual_seed(A )
snake_case : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""video""": video,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def UpperCAmelCase ( self ) -> str:
snake_case : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case : Tuple = self.get_dummy_components()
snake_case : List[str] = VideoToVideoSDPipeline(**A )
snake_case : Any = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
snake_case : Tuple = self.get_dummy_inputs(A )
snake_case : int = """np"""
snake_case : Dict = sd_pipe(**A ).frames
snake_case : Optional[int] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (3_2, 3_2, 3)
snake_case : int = np.array([1_0_6, 1_1_7, 1_1_3, 1_7_4, 1_3_7, 1_1_2, 1_4_8, 1_5_1, 1_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCAmelCase ( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A , expected_max_diff=5e-3 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCAmelCase ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def UpperCAmelCase ( self ) -> Optional[Any]:
pass
def UpperCAmelCase ( self ) -> List[Any]:
return super().test_progress_bar()
@slow
@skip_mps
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : Tuple = VideoToVideoSDPipeline.from_pretrained("""cerspense/zeroscope_v2_XL""" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
snake_case : Any = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case : Dict = torch.randn((1, 1_0, 3, 1_0_2_4, 5_7_6) , generator=A )
snake_case : Optional[Any] = video.to("""cuda""" )
snake_case : int = """Spiderman is surfing"""
snake_case : Optional[Any] = pipe(A , video=A , generator=A , num_inference_steps=3 , output_type="""pt""" ).frames
snake_case : str = np.array([-1.0_45_89_84, -1.1_27_92_97, -0.9_66_30_86, -0.91_50_39_06, -0.75_09_76_56] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 684 |
import numpy as np
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Tuple = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 1_2_8, """min_length""": 1_2, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 1_4_2, """min_length""": 5_6, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 6_2, """min_length""": 1_1, """num_beams""": 6},
}
}
snake_case : Any = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 1_2_8,
"""task_specific_params.summarization.min_length""": 1_2,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 1_4_2,
"""task_specific_params.summarization_cnn.min_length""": 5_6,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 6_2,
"""task_specific_params.summarization_xsum.min_length""": 1_1,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(A ) , A )
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Dict = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(A ) , x.transpose() ) )
snake_case : Optional[int] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(A , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def UpperCAmelCase ( self ) -> str:
snake_case : Optional[Any] = np.random.randn(3 , 4 )
snake_case : Dict = torch.tensor(A )
self.assertTrue(np.allclose(transpose(A ) , transpose(A ).numpy() ) )
snake_case : Dict = np.random.randn(3 , 4 , 5 )
snake_case : Any = torch.tensor(A )
self.assertTrue(np.allclose(transpose(A , axes=(1, 2, 0) ) , transpose(A , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Optional[Any] = np.random.randn(3 , 4 )
snake_case : Dict = tf.constant(A )
self.assertTrue(np.allclose(transpose(A ) , transpose(A ).numpy() ) )
snake_case : str = np.random.randn(3 , 4 , 5 )
snake_case : List[Any] = tf.constant(A )
self.assertTrue(np.allclose(transpose(A , axes=(1, 2, 0) ) , transpose(A , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : Any = np.random.randn(3 , 4 )
snake_case : Optional[int] = jnp.array(A )
self.assertTrue(np.allclose(transpose(A ) , np.asarray(transpose(A ) ) ) )
snake_case : Tuple = np.random.randn(3 , 4 , 5 )
snake_case : Optional[int] = jnp.array(A )
self.assertTrue(np.allclose(transpose(A , axes=(1, 2, 0) ) , np.asarray(transpose(A , axes=(1, 2, 0) ) ) ) )
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Tuple = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(A , (4, 3) ) , np.reshape(A , (4, 3) ) ) )
snake_case : List[str] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(A , (1_2, 5) ) , np.reshape(A , (1_2, 5) ) ) )
@require_torch
def UpperCAmelCase ( self ) -> Dict:
snake_case : int = np.random.randn(3 , 4 )
snake_case : Dict = torch.tensor(A )
self.assertTrue(np.allclose(reshape(A , (4, 3) ) , reshape(A , (4, 3) ).numpy() ) )
snake_case : Optional[Any] = np.random.randn(3 , 4 , 5 )
snake_case : Any = torch.tensor(A )
self.assertTrue(np.allclose(reshape(A , (1_2, 5) ) , reshape(A , (1_2, 5) ).numpy() ) )
@require_tf
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : int = np.random.randn(3 , 4 )
snake_case : List[str] = tf.constant(A )
self.assertTrue(np.allclose(reshape(A , (4, 3) ) , reshape(A , (4, 3) ).numpy() ) )
snake_case : Optional[int] = np.random.randn(3 , 4 , 5 )
snake_case : str = tf.constant(A )
self.assertTrue(np.allclose(reshape(A , (1_2, 5) ) , reshape(A , (1_2, 5) ).numpy() ) )
@require_flax
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Dict = np.random.randn(3 , 4 )
snake_case : Union[str, Any] = jnp.array(A )
self.assertTrue(np.allclose(reshape(A , (4, 3) ) , np.asarray(reshape(A , (4, 3) ) ) ) )
snake_case : Optional[Any] = np.random.randn(3 , 4 , 5 )
snake_case : Optional[int] = jnp.array(A )
self.assertTrue(np.allclose(reshape(A , (1_2, 5) ) , np.asarray(reshape(A , (1_2, 5) ) ) ) )
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Optional[Any] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(A ) , np.squeeze(A ) ) )
snake_case : int = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(A , axis=2 ) , np.squeeze(A , axis=2 ) ) )
@require_torch
def UpperCAmelCase ( self ) -> Tuple:
snake_case : List[Any] = np.random.randn(1 , 3 , 4 )
snake_case : Optional[Any] = torch.tensor(A )
self.assertTrue(np.allclose(squeeze(A ) , squeeze(A ).numpy() ) )
snake_case : List[str] = np.random.randn(1 , 4 , 1 , 5 )
snake_case : Tuple = torch.tensor(A )
self.assertTrue(np.allclose(squeeze(A , axis=2 ) , squeeze(A , axis=2 ).numpy() ) )
@require_tf
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Union[str, Any] = np.random.randn(1 , 3 , 4 )
snake_case : List[Any] = tf.constant(A )
self.assertTrue(np.allclose(squeeze(A ) , squeeze(A ).numpy() ) )
snake_case : int = np.random.randn(1 , 4 , 1 , 5 )
snake_case : List[Any] = tf.constant(A )
self.assertTrue(np.allclose(squeeze(A , axis=2 ) , squeeze(A , axis=2 ).numpy() ) )
@require_flax
def UpperCAmelCase ( self ) -> Dict:
snake_case : Optional[int] = np.random.randn(1 , 3 , 4 )
snake_case : Union[str, Any] = jnp.array(A )
self.assertTrue(np.allclose(squeeze(A ) , np.asarray(squeeze(A ) ) ) )
snake_case : List[Any] = np.random.randn(1 , 4 , 1 , 5 )
snake_case : Optional[int] = jnp.array(A )
self.assertTrue(np.allclose(squeeze(A , axis=2 ) , np.asarray(squeeze(A , axis=2 ) ) ) )
def UpperCAmelCase ( self ) -> int:
snake_case : Tuple = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(A , axis=1 ) , np.expand_dims(A , axis=1 ) ) )
@require_torch
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : str = np.random.randn(3 , 4 )
snake_case : List[Any] = torch.tensor(A )
self.assertTrue(np.allclose(expand_dims(A , axis=1 ) , expand_dims(A , axis=1 ).numpy() ) )
@require_tf
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : str = np.random.randn(3 , 4 )
snake_case : Dict = tf.constant(A )
self.assertTrue(np.allclose(expand_dims(A , axis=1 ) , expand_dims(A , axis=1 ).numpy() ) )
@require_flax
def UpperCAmelCase ( self ) -> Dict:
snake_case : str = np.random.randn(3 , 4 )
snake_case : List[str] = jnp.array(A )
self.assertTrue(np.allclose(expand_dims(A , axis=1 ) , np.asarray(expand_dims(A , axis=1 ) ) ) )
| 684 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase : Tuple = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 684 | 1 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowerCamelCase : int = logging.get_logger(__name__)
class __lowercase :
"""simple docstring"""
def __init__( self , A , A ) -> str:
snake_case : Dict = question_encoder
snake_case : Optional[Any] = generator
snake_case : Tuple = self.question_encoder
def UpperCAmelCase ( self , A ) -> Optional[int]:
if os.path.isfile(A ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(A , exist_ok=A )
snake_case : Tuple = os.path.join(A , """question_encoder_tokenizer""" )
snake_case : List[Any] = os.path.join(A , """generator_tokenizer""" )
self.question_encoder.save_pretrained(A )
self.generator.save_pretrained(A )
@classmethod
def UpperCAmelCase ( cls , A , **A ) -> Any:
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
snake_case : int = kwargs.pop("""config""" , A )
if config is None:
snake_case : List[str] = RagConfig.from_pretrained(A )
snake_case : Optional[Any] = AutoTokenizer.from_pretrained(
A , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
snake_case : Optional[int] = AutoTokenizer.from_pretrained(
A , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=A , generator=A )
def __call__( self , *A , **A ) -> str:
return self.current_tokenizer(*A , **A )
def UpperCAmelCase ( self , *A , **A ) -> Dict:
return self.generator.batch_decode(*A , **A )
def UpperCAmelCase ( self , *A , **A ) -> List[str]:
return self.generator.decode(*A , **A )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : List[Any] = self.question_encoder
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : int = self.generator
def UpperCAmelCase ( self , A , A = None , A = None , A = None , A = "longest" , A = None , A = True , **A , ) -> BatchEncoding:
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , A , )
if max_length is None:
snake_case : List[str] = self.current_tokenizer.model_max_length
snake_case : List[Any] = self(
A , add_special_tokens=A , return_tensors=A , max_length=A , padding=A , truncation=A , **A , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
snake_case : Optional[Any] = self.current_tokenizer.model_max_length
snake_case : Dict = self(
text_target=A , add_special_tokens=A , return_tensors=A , padding=A , max_length=A , truncation=A , **A , )
snake_case : int = labels["""input_ids"""]
return model_inputs
| 684 |
lowerCamelCase : Union[str, Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCamelCase : Tuple = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCamelCase : Union[str, Any] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 684 | 1 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Any = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """efficientformer"""
def __init__( self , A = [3, 2, 6, 4] , A = [4_8, 9_6, 2_2_4, 4_4_8] , A = [True, True, True, True] , A = 4_4_8 , A = 3_2 , A = 4 , A = 7 , A = 5 , A = 8 , A = 4 , A = 0.0 , A = 1_6 , A = 3 , A = 3 , A = 3 , A = 2 , A = 1 , A = 0.0 , A = 1 , A = True , A = True , A = 1e-5 , A = "gelu" , A = 0.02 , A = 1e-1_2 , A = 2_2_4 , A = 1e-0_5 , **A , ) -> None:
super().__init__(**A )
snake_case : Optional[Any] = hidden_act
snake_case : List[Any] = hidden_dropout_prob
snake_case : List[Any] = hidden_sizes
snake_case : Dict = num_hidden_layers
snake_case : List[str] = num_attention_heads
snake_case : Any = initializer_range
snake_case : Tuple = layer_norm_eps
snake_case : int = patch_size
snake_case : Tuple = num_channels
snake_case : List[Any] = depths
snake_case : Dict = mlp_expansion_ratio
snake_case : List[Any] = downsamples
snake_case : Any = dim
snake_case : str = key_dim
snake_case : Union[str, Any] = attention_ratio
snake_case : Any = resolution
snake_case : Tuple = pool_size
snake_case : Optional[int] = downsample_patch_size
snake_case : Dict = downsample_stride
snake_case : List[Any] = downsample_pad
snake_case : Union[str, Any] = drop_path_rate
snake_case : int = num_metaad_blocks
snake_case : Optional[Any] = distillation
snake_case : Union[str, Any] = use_layer_scale
snake_case : Optional[int] = layer_scale_init_value
snake_case : List[str] = image_size
snake_case : int = batch_norm_eps
| 684 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'vocab_file': 'spm_char.model'}
lowerCamelCase : List[str] = {
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
lowerCamelCase : List[Any] = {
'microsoft/speecht5_asr': 1_0_2_4,
'microsoft/speecht5_tts': 1_0_2_4,
'microsoft/speecht5_vc': 1_0_2_4,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A="<s>" , A="</s>" , A="<unk>" , A="<pad>" , A = None , **A , ) -> None:
snake_case : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , pad_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
snake_case : Tuple = vocab_file
snake_case : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def UpperCAmelCase ( self ) -> List[Any]:
return self.sp_model.get_piece_size()
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Any = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[str]:
snake_case : Optional[Any] = self.__dict__.copy()
snake_case : Optional[Any] = None
return state
def __setstate__( self , A ) -> Tuple:
snake_case : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case : List[Any] = {}
snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def UpperCAmelCase ( self , A ) -> Tuple:
return self.sp_model.piece_to_id(A )
def UpperCAmelCase ( self , A ) -> int:
snake_case : Union[str, Any] = self.sp_model.IdToPiece(A )
return token
def UpperCAmelCase ( self , A ) -> Tuple:
snake_case : Optional[int] = []
snake_case : str = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A ) + token
snake_case : Dict = []
else:
current_sub_tokens.append(A )
out_string += self.sp_model.decode(A )
return out_string.strip()
def UpperCAmelCase ( self , A , A=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
snake_case : Any = [1]
if token_ids_a is None:
return ([0] * len(A )) + suffix_ones
return ([0] * len(A )) + ([0] * len(A )) + suffix_ones
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Optional[Any] = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , """wb""" ) as fi:
snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 684 | 1 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , A = 1_0_1 ) -> str:
snake_case : Any = length
def __len__( self ) -> List[str]:
return self.length
def __getitem__( self , A ) -> int:
return i
class __lowercase :
"""simple docstring"""
def __call__( self , A ) -> Any:
return {"input_ids": torch.tensor(A ), "labels": torch.tensor(A )}
class __lowercase (nn.Module ):
"""simple docstring"""
def __init__( self ) -> List[str]:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
snake_case : Any = nn.Linear(1_2_0 , 8_0 )
def UpperCAmelCase ( self , A , A=None ) -> Dict:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
@require_torch_neuroncore
def UpperCAmelCase ( self ) -> Any:
snake_case : Dict = f"""--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
snake_case : int = self.get_auto_remove_tmp_dir()
snake_case : Dict = f"""--output_dir {output_dir}""".split()
snake_case : Dict = ["""torchrun"""] + distributed_args + args
execute_subprocess_async(A , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
@require_torch_multi_gpu
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Optional[Any] = f"""--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
snake_case : Union[str, Any] = self.get_auto_remove_tmp_dir()
snake_case : List[str] = f"""--output_dir {output_dir}""".split()
snake_case : Optional[int] = ["""torchrun"""] + distributed_args + args
execute_subprocess_async(A , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
lowerCamelCase : Union[str, Any] = HfArgumentParser((TrainingArguments,))
lowerCamelCase : str = parser.parse_args_into_dataclasses()[0]
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
f"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_0_1, 4_0, 7]:
lowerCamelCase : Dict = DummyDataset(dataset_length)
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Dict:
snake_case : Dict = list(range(len(lowercase ) ) )
snake_case : Optional[Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"""Predictions and/or labels do not match expected results:\n - predictions: """
f"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" )
return {"success": success}
lowerCamelCase : Tuple = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
lowerCamelCase : Tuple = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowerCamelCase : Dict = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowerCamelCase : Tuple = 2
lowerCamelCase : List[Any] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowerCamelCase : Optional[int] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowerCamelCase : Tuple = None
| 684 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """gpt_neox_japanese"""
def __init__( self , A=3_2_0_0_0 , A=2_5_6_0 , A=3_2 , A=3_2 , A=4 , A="gelu" , A=1.00 , A=1_0_0_0_0 , A=2_0_4_8 , A=0.02 , A=1e-5 , A=True , A=3_1_9_9_6 , A=3_1_9_9_9 , A=0.1 , A=0.0 , **A , ) -> str:
super().__init__(bos_token_id=A , eos_token_id=A , **A )
snake_case : Optional[Any] = vocab_size
snake_case : Optional[Any] = max_position_embeddings
snake_case : Union[str, Any] = hidden_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : Optional[int] = intermediate_multiple_size
snake_case : int = hidden_act
snake_case : str = rotary_pct
snake_case : Optional[Any] = rotary_emb_base
snake_case : Any = initializer_range
snake_case : Any = layer_norm_eps
snake_case : Optional[Any] = use_cache
snake_case : Tuple = attention_dropout
snake_case : Tuple = hidden_dropout
| 684 | 1 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ComputeEnvironment.AMAZON_SAGEMAKER
_snake_case = True
_snake_case = """ml.p3.2xlarge"""
_snake_case = """accelerate_sagemaker_execution_role"""
_snake_case = """hf-sm"""
_snake_case = """us-east-1"""
_snake_case = 1
_snake_case = """accelerate-sagemaker-1"""
_snake_case = """1.6"""
_snake_case = """4.4"""
_snake_case = """train.py"""
_snake_case = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""False""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
_snake_case = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""--do_test""",
"""False""",
"""--do_predict""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> str:
# If no defaults are changed, `to_kwargs` returns an empty dict.
snake_case : Union[str, Any] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["""model_name_or_path"""] , A )
assert isinstance(converted_args["""do_train"""] , A )
assert isinstance(converted_args["""epochs"""] , A )
assert isinstance(converted_args["""learning_rate"""] , A )
assert isinstance(converted_args["""max_steps"""] , A )
with pytest.raises(A ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 684 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Optional[Any] = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
snake_case : Any = hex_num[0] == """-"""
if is_negative:
snake_case : int = hex_num[1:]
try:
snake_case : List[Any] = int(lowercase ,16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
snake_case : Dict = """"""
while int_num > 0:
snake_case : Dict = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> str:
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
snake_case : Union[str, Any] = str(bin(lowercase ) )[2:] # remove the leading "0b"
snake_case : Union[str, Any] = str(bin(lowercase ) )[2:] # remove the leading "0b"
snake_case : str = max(len(lowercase ) ,len(lowercase ) )
return "0b" + "".join(
str(int(char_a == """1""" and char_b == """1""" ) )
for char_a, char_b in zip(a_binary.zfill(lowercase ) ,b_binary.zfill(lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""pixel_values"""]
def __init__( self , A = True , A = None , A = PIL.Image.BICUBIC , A = True , A = None , A = 1 / 2_5_5 , A = True , A = True , A = None , A = None , **A , ) -> None:
super().__init__(**A )
snake_case : int = size if size is not None else {"""height""": 2_5_6, """width""": 2_5_6}
snake_case : int = get_size_dict(A )
snake_case : Optional[Any] = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
snake_case : Dict = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = do_resize
snake_case : str = size
snake_case : Tuple = resample
snake_case : Any = do_center_crop
snake_case : Tuple = crop_size
snake_case : int = do_rescale
snake_case : Dict = rescale_factor
snake_case : Union[str, Any] = do_normalize
snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase ( self , A , A , A = PIL.Image.BICUBIC , A = None , **A , ) -> np.ndarray:
snake_case : Dict = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
A , size=(size["""height"""], size["""width"""]) , resample=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> np.ndarray:
snake_case : Any = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(A , size=(size["""height"""], size["""width"""]) , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> Tuple:
return rescale(A , scale=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A , A = None , **A , ) -> np.ndarray:
return normalize(A , mean=A , std=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A = None , A = None , A=None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
snake_case : str = do_resize if do_resize is not None else self.do_resize
snake_case : Dict = resample if resample is not None else self.resample
snake_case : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Tuple = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case : int = image_mean if image_mean is not None else self.image_mean
snake_case : List[str] = image_std if image_std is not None else self.image_std
snake_case : Dict = size if size is not None else self.size
snake_case : Tuple = get_size_dict(A )
snake_case : Dict = crop_size if crop_size is not None else self.crop_size
snake_case : List[str] = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case : Optional[Any] = [to_numpy_array(A ) for image in images]
if do_resize:
snake_case : Dict = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_center_crop:
snake_case : List[str] = [self.center_crop(image=A , size=A ) for image in images]
if do_rescale:
snake_case : List[str] = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
snake_case : str = [self.normalize(image=A , mean=A , std=A ) for image in images]
snake_case : Union[str, Any] = [to_channel_dimension_format(A , A ) for image in images]
snake_case : List[Any] = {"""pixel_values""": images}
return BatchFeature(data=A , tensor_type=A )
| 684 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __lowercase (UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_snake_case = 1
@register_to_config
def __init__( self , A=2_0_0_0 , A=0.1 , A=2_0 , A=1e-3 ) -> Optional[Any]:
snake_case : List[str] = None
snake_case : List[str] = None
snake_case : int = None
def UpperCAmelCase ( self , A , A = None ) -> Tuple:
snake_case : Dict = torch.linspace(1 , self.config.sampling_eps , A , device=A )
def UpperCAmelCase ( self , A , A , A , A=None ) -> Any:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
snake_case : List[str] = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
snake_case : Tuple = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
snake_case : Any = std.flatten()
while len(std.shape ) < len(score.shape ):
snake_case : Union[str, Any] = std.unsqueeze(-1 )
snake_case : int = -score / std
# compute
snake_case : List[str] = -1.0 / len(self.timesteps )
snake_case : Tuple = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
snake_case : List[str] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
snake_case : Optional[Any] = beta_t.unsqueeze(-1 )
snake_case : List[str] = -0.5 * beta_t * x
snake_case : str = torch.sqrt(A )
snake_case : Optional[int] = drift - diffusion**2 * score
snake_case : Dict = x + drift * dt
# add noise
snake_case : int = randn_tensor(x.shape , layout=x.layout , generator=A , device=x.device , dtype=x.dtype )
snake_case : List[Any] = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ) -> int:
return self.config.num_train_timesteps
| 684 |
import inspect
import unittest
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase ( self ) -> Tuple:
import diffusers
from diffusers.dependency_versions_table import deps
snake_case : List[str] = inspect.getmembers(A , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
snake_case : Tuple = """k-diffusion"""
elif backend == "invisible_watermark":
snake_case : Optional[int] = """invisible-watermark"""
assert backend in deps, f"""{backend} is not in the deps table!"""
| 684 | 1 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCamelCase : int = '<<<<<<< This should probably be modified because it mentions: '
lowerCamelCase : Tuple = '=======\n>>>>>>>\n'
lowerCamelCase : Optional[int] = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
lowerCamelCase : Dict = [
# (pattern, replacement)
# Order is important here for some replacements
(r'tfds\.core', r'datasets'),
(r'tf\.io\.gfile\.GFile', r'open'),
(r'tf\.([\w\d]+)', r'datasets.Value(\'\1\')'),
(r'tfds\.features\.Text\(\)', r'datasets.Value(\'string\')'),
(r'tfds\.features\.Text\(', r'datasets.Value(\'string\'),'),
(r'features\s*=\s*tfds.features.FeaturesDict\(', r'features=datasets.Features('),
(r'tfds\.features\.FeaturesDict\(', r'dict('),
(r'The TensorFlow Datasets Authors', r'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(r'tfds\.', r'datasets.'),
(r'dl_manager\.manual_dir', r'self.config.data_dir'),
(r'self\.builder_config', r'self.config'),
]
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
return ConvertCommand(args.tfds_path ,args.datasets_directory )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( A ) -> Tuple:
snake_case : Union[str, Any] = parser.add_parser(
"""convert""" , help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" , )
train_parser.add_argument(
"""--tfds_path""" , type=A , required=A , help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" , )
train_parser.add_argument(
"""--datasets_directory""" , type=A , required=A , help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=A )
def __init__( self , A , A , *A ) -> List[Any]:
snake_case : List[Any] = get_logger("""datasets-cli/converting""" )
snake_case : Optional[int] = tfds_path
snake_case : Optional[Any] = datasets_directory
def UpperCAmelCase ( self ) -> str:
if os.path.isdir(self._tfds_path ):
snake_case : List[str] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
snake_case : List[Any] = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
snake_case : Optional[int] = os.path.abspath(self._datasets_directory )
self._logger.info(f"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
snake_case : List[Any] = []
snake_case : Dict = []
snake_case : int = {}
if os.path.isdir(self._tfds_path ):
snake_case : Optional[int] = os.listdir(A )
else:
snake_case : Tuple = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"""Looking at file {f_name}""" )
snake_case : str = os.path.join(A , A )
snake_case : Tuple = os.path.join(A , A )
if not os.path.isfile(A ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(A , encoding="""utf-8""" ) as f:
snake_case : Any = f.readlines()
snake_case : Tuple = []
snake_case : Union[str, Any] = False
snake_case : Optional[int] = False
snake_case : Tuple = []
for line in lines:
snake_case : Tuple = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
snake_case : List[Any] = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
snake_case : List[str] = """"""
continue
elif "from absl import logging" in out_line:
snake_case : Union[str, Any] = """from datasets import logging\n"""
elif "getLogger" in out_line:
snake_case : List[str] = out_line.replace("""getLogger""" , """get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
snake_case : Dict = True
snake_case : Union[str, Any] = list(filter(lambda A : e in out_line , A ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(A ) + """\n""" )
out_lines.append(A )
out_lines.append(A )
continue
else:
for pattern, replacement in TO_CONVERT:
snake_case : List[Any] = re.sub(A , A , A )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
snake_case : Tuple = re.match(r"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" , A )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
snake_case : Optional[Any] = """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
snake_case : int = True
out_lines.append(A )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
snake_case : Any = f_name.replace(""".py""" , """""" )
snake_case : List[str] = os.path.join(A , A )
snake_case : Union[str, Any] = os.path.join(A , A )
os.makedirs(A , exist_ok=A )
self._logger.info(f"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(A )
if needs_manual_update:
with_manual_update.append(A )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.writelines(A )
self._logger.info(f"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
snake_case : List[str] = os.path.basename(A )
snake_case : str = imports_to_builder_map[f_name.replace(""".py""" , """""" )]
self._logger.info(f"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(A , A )
except KeyError:
self._logger.error(f"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 684 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
lowerCamelCase : Union[str, Any] = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
lowerCamelCase : List[Any] = 'main'
# Default branch name
lowerCamelCase : Tuple = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
lowerCamelCase : List[Any] = 'aaaaaaa'
# This commit does not exist, so we should 404.
lowerCamelCase : List[Any] = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
lowerCamelCase : int = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> int:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> Optional[Any]:
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def UpperCAmelCase ( self ) -> Optional[Any]:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_tf
def UpperCAmelCase ( self ) -> str:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_flax
def UpperCAmelCase ( self ) -> Any:
# Flax models don't have labels
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , [] )
| 684 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = field(default="""image-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
_snake_case = Features({"""image""": Image()} )
_snake_case = Features({"""labels""": ClassLabel} )
_snake_case = "image"
_snake_case = "labels"
def UpperCAmelCase ( self , A ) -> Optional[int]:
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , A ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
snake_case : Dict = copy.deepcopy(self )
snake_case : Optional[Any] = self.label_schema.copy()
snake_case : Union[str, Any] = features[self.label_column]
snake_case : List[str] = label_schema
return task_template
@property
def UpperCAmelCase ( self ) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| 684 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """audio-spectrogram-transformer"""
def __init__( self , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.0 , A=0.0 , A=0.02 , A=1e-1_2 , A=1_6 , A=True , A=1_0 , A=1_0 , A=1_0_2_4 , A=1_2_8 , **A , ) -> int:
super().__init__(**A )
snake_case : Any = hidden_size
snake_case : Tuple = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Dict = intermediate_size
snake_case : int = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : int = initializer_range
snake_case : int = layer_norm_eps
snake_case : Any = patch_size
snake_case : List[Any] = qkv_bias
snake_case : int = frequency_stride
snake_case : Any = time_stride
snake_case : Union[str, Any] = max_length
snake_case : Any = num_mel_bins
| 684 | 1 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Union[str, Any] = SMALL_MODEL_IDENTIFIER
snake_case : List[Any] = """pt"""
snake_case : Any = """tf"""
def UpperCAmelCase ( self , A ) -> Any:
snake_case : Union[str, Any] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(A )
def UpperCAmelCase ( self , A ) -> Any:
snake_case : Tuple = TFAutoModel.from_pretrained(self.test_model , from_pt=A )
model_tf.save_pretrained(A )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Dict = """mock_framework"""
# Framework provided - return whatever the user provides
snake_case : Optional[int] = FeaturesManager.determine_framework(self.test_model , A )
self.assertEqual(A , A )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(A )
snake_case : str = FeaturesManager.determine_framework(A , A )
self.assertEqual(A , A )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(A )
snake_case : Dict = FeaturesManager.determine_framework(A , A )
self.assertEqual(A , A )
def UpperCAmelCase ( self ) -> Dict:
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(A )
snake_case : int = FeaturesManager.determine_framework(A )
self.assertEqual(A , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(A )
snake_case : Optional[Any] = FeaturesManager.determine_framework(A )
self.assertEqual(A , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(A ):
snake_case : Any = FeaturesManager.determine_framework(A )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : Union[str, Any] = MagicMock(return_value=A )
with patch("""transformers.onnx.features.is_tf_available""" , A ):
snake_case : List[str] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(A , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
snake_case : Optional[Any] = MagicMock(return_value=A )
with patch("""transformers.onnx.features.is_torch_available""" , A ):
snake_case : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(A , self.framework_tf )
# Both in environment -> use PyTorch
snake_case : List[Any] = MagicMock(return_value=A )
snake_case : Any = MagicMock(return_value=A )
with patch("""transformers.onnx.features.is_tf_available""" , A ), patch(
"""transformers.onnx.features.is_torch_available""" , A ):
snake_case : List[str] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(A , self.framework_pt )
# Both not in environment -> raise error
snake_case : Union[str, Any] = MagicMock(return_value=A )
snake_case : List[Any] = MagicMock(return_value=A )
with patch("""transformers.onnx.features.is_tf_available""" , A ), patch(
"""transformers.onnx.features.is_torch_available""" , A ):
with self.assertRaises(A ):
snake_case : Dict = FeaturesManager.determine_framework(self.test_model )
| 684 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase : Any = logging.get_logger(__name__)
class __lowercase (enum.Enum ):
"""simple docstring"""
_snake_case = 0
_snake_case = 1
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """generated"""
def __init__( self , *A , **A ) -> Optional[Any]:
super().__init__(*A , **A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase ( self , A=None , A=None , A=None , A=None , A=None , A=None , **A , ) -> Optional[int]:
snake_case : Tuple = {}
if truncation is not None:
snake_case : Union[str, Any] = truncation
snake_case : Dict = generate_kwargs
snake_case : int = {}
if return_tensors is not None and return_type is None:
snake_case : List[Any] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
snake_case : List[str] = return_type
if clean_up_tokenization_spaces is not None:
snake_case : int = clean_up_tokenization_spaces
if stop_sequence is not None:
snake_case : Tuple = self.tokenizer.encode(A , add_special_tokens=A )
if len(A ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
snake_case : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
return True
def UpperCAmelCase ( self , *A , A ) -> Tuple:
snake_case : Union[str, Any] = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , A ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
snake_case : Union[str, Any] = ([prefix + arg for arg in args[0]],)
snake_case : List[Any] = True
elif isinstance(args[0] , A ):
snake_case : str = (prefix + args[0],)
snake_case : str = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
snake_case : Optional[Any] = self.tokenizer(*A , padding=A , truncation=A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *A , **A ) -> Union[str, Any]:
snake_case : Tuple = super().__call__(*A , **A )
if (
isinstance(args[0] , A )
and all(isinstance(A , A ) for el in args[0] )
and all(len(A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase ( self , A , A=TruncationStrategy.DO_NOT_TRUNCATE , **A ) -> str:
snake_case : Optional[Any] = self._parse_and_tokenize(A , truncation=A , **A )
return inputs
def UpperCAmelCase ( self , A , **A ) -> Tuple:
if self.framework == "pt":
snake_case , snake_case : List[str] = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
snake_case , snake_case : Optional[Any] = tf.shape(model_inputs["""input_ids"""] ).numpy()
snake_case : Dict = generate_kwargs.get("""min_length""" , self.model.config.min_length )
snake_case : str = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(A , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
snake_case : List[str] = self.model.generate(**A , **A )
snake_case : Dict = output_ids.shape[0]
if self.framework == "pt":
snake_case : List[Any] = output_ids.reshape(A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
snake_case : Any = tf.reshape(A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase ( self , A , A=ReturnType.TEXT , A=False ) -> Union[str, Any]:
snake_case : Tuple = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
snake_case : Dict = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
snake_case : int = {
f"""{self.return_name}_text""": self.tokenizer.decode(
A , skip_special_tokens=A , clean_up_tokenization_spaces=A , )
}
records.append(A )
return records
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """summary"""
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
def UpperCAmelCase ( self , A , A , A ) -> bool:
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """translation"""
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def UpperCAmelCase ( self , *A , A=TruncationStrategy.DO_NOT_TRUNCATE , A=None , A=None ) -> Optional[int]:
if getattr(self.tokenizer , """_build_translation_inputs""" , A ):
return self.tokenizer._build_translation_inputs(
*A , return_tensors=self.framework , truncation=A , src_lang=A , tgt_lang=A )
else:
return super()._parse_and_tokenize(*A , truncation=A )
def UpperCAmelCase ( self , A=None , A=None , **A ) -> Union[str, Any]:
snake_case , snake_case , snake_case : str = super()._sanitize_parameters(**A )
if src_lang is not None:
snake_case : Tuple = src_lang
if tgt_lang is not None:
snake_case : str = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
snake_case : Union[str, Any] = kwargs.get("""task""" , self.task )
snake_case : Any = task.split("""_""" )
if task and len(A ) == 4:
# translation, XX, to YY
snake_case : Optional[Any] = items[1]
snake_case : Dict = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
| 684 | 1 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase=1024 ) -> Dict:
snake_case , snake_case : Union[str, Any] = [], []
snake_case : List[str] = list(zip(lowercase ,lowercase ) )
snake_case , snake_case : str = sorted_examples[0]
def is_too_big(lowercase ):
return tok(lowercase ,return_tensors="""pt""" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
snake_case : str = new_src + """ """ + src
snake_case : Optional[int] = new_tgt + """ """ + tgt
if is_too_big(lowercase ) or is_too_big(lowercase ): # cant fit, finalize example
finished_src.append(lowercase )
finished_tgt.append(lowercase )
snake_case , snake_case : Optional[Any] = src, tgt
else: # can fit, keep adding
snake_case , snake_case : str = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(lowercase )
finished_tgt.append(lowercase )
return finished_src, finished_tgt
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> int:
snake_case : str = Path(lowercase )
save_path.mkdir(exist_ok=lowercase )
for split in ["train"]:
snake_case , snake_case : Union[str, Any] = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
snake_case : Optional[Any] = [x.rstrip() for x in Path(lowercase ).open().readlines()]
snake_case : Union[str, Any] = [x.rstrip() for x in Path(lowercase ).open().readlines()]
snake_case , snake_case : Dict = pack_examples(lowercase ,lowercase ,lowercase ,lowercase )
print(f"""packed {split} split from {len(lowercase )} examples -> {len(lowercase )}.""" )
Path(save_path / f"""{split}.source""" ).open("""w""" ).write("""\n""".join(lowercase ) )
Path(save_path / f"""{split}.target""" ).open("""w""" ).write("""\n""".join(lowercase ) )
for split in ["val", "test"]:
snake_case , snake_case : List[Any] = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
shutil.copyfile(lowercase ,save_path / f"""{split}.source""" )
shutil.copyfile(lowercase ,save_path / f"""{split}.target""" )
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
snake_case : Dict = argparse.ArgumentParser()
parser.add_argument("""--tok_name""" ,type=lowercase ,help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""--max_seq_len""" ,type=lowercase ,default=128 )
parser.add_argument("""--data_dir""" ,type=lowercase )
parser.add_argument("""--save_path""" ,type=lowercase )
snake_case : Union[str, Any] = parser.parse_args()
snake_case : Any = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(lowercase ,Path(args.data_dir ) ,args.max_seq_len ,args.save_path )
if __name__ == "__main__":
packer_cli()
| 684 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
snake_case : int = []
for line in lines:
snake_case : Dict = re.sub(R"""#.*""" ,"""""" ,lowercase ) # remove comments
if line:
filtered_lines.append(lowercase )
snake_case : Optional[int] = """\n""".join(lowercase )
# Make a hash from all this code
snake_case : List[str] = full_str.encode("""utf-8""" )
return shaaaa(lowercase ).hexdigest()
# get importable module names and hash for caching
lowerCamelCase : Any = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
lowerCamelCase : Optional[int] = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
lowerCamelCase : Tuple = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
lowerCamelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 684 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """mobilenet_v1"""
def __init__( self , A=3 , A=2_2_4 , A=1.0 , A=8 , A="relu6" , A=True , A=0.9_99 , A=0.02 , A=0.0_01 , **A , ) -> int:
super().__init__(**A )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
snake_case : List[str] = num_channels
snake_case : Any = image_size
snake_case : Dict = depth_multiplier
snake_case : Union[str, Any] = min_depth
snake_case : Optional[int] = hidden_act
snake_case : str = tf_padding
snake_case : Optional[Any] = classifier_dropout_prob
snake_case : Dict = initializer_range
snake_case : Union[str, Any] = layer_norm_eps
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = version.parse("""1.11""" )
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def UpperCAmelCase ( self ) -> float:
return 1e-4
| 684 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Tuple:
# Initialise PyTorch model
snake_case : int = RemBertConfig.from_json_file(lowercase )
print("""Building PyTorch model from configuration: {}""".format(str(lowercase ) ) )
snake_case : Tuple = RemBertModel(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowercase ,lowercase ,lowercase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(lowercase ) )
torch.save(model.state_dict() ,lowercase )
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCamelCase : Dict = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 684 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : str = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 684 |
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[str]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Union[str, Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Any:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Dict:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
| 684 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """funnel"""
_snake_case = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
}
def __init__( self , A=3_0_5_2_2 , A=[4, 4, 4] , A=None , A=2 , A=7_6_8 , A=1_2 , A=6_4 , A=3_0_7_2 , A="gelu_new" , A=0.1 , A=0.1 , A=0.0 , A=0.1 , A=None , A=1e-9 , A="mean" , A="relative_shift" , A=True , A=True , A=True , **A , ) -> str:
snake_case : Dict = vocab_size
snake_case : int = block_sizes
snake_case : Tuple = [1] * len(A ) if block_repeats is None else block_repeats
assert len(A ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
snake_case : Dict = num_decoder_layers
snake_case : Optional[Any] = d_model
snake_case : Optional[int] = n_head
snake_case : Dict = d_head
snake_case : List[Any] = d_inner
snake_case : List[str] = hidden_act
snake_case : Union[str, Any] = hidden_dropout
snake_case : Dict = attention_dropout
snake_case : Any = activation_dropout
snake_case : Any = initializer_range
snake_case : int = initializer_std
snake_case : str = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
snake_case : Optional[int] = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
snake_case : Union[str, Any] = attention_type
snake_case : Dict = separate_cls
snake_case : Optional[int] = truncate_seq
snake_case : Union[str, Any] = pool_q_only
super().__init__(**A )
@property
def UpperCAmelCase ( self ) -> int:
return sum(self.block_sizes )
@num_hidden_layers.setter
def UpperCAmelCase ( self , A ) -> List[Any]:
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.""" )
@property
def UpperCAmelCase ( self ) -> List[Any]:
return len(self.block_sizes )
@num_blocks.setter
def UpperCAmelCase ( self , A ) -> Optional[int]:
raise NotImplementedError("""This model does not support the setting of `num_blocks`. Please set `block_sizes`.""" )
| 684 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCamelCase : List[str] = 3
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
print("""Generating primitive root of p""" )
while True:
snake_case : Optional[int] = random.randrange(3 ,lowercase )
if pow(lowercase ,2 ,lowercase ) == 1:
continue
if pow(lowercase ,lowercase ,lowercase ) == 1:
continue
return g
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print("""Generating prime p...""" )
snake_case : Optional[int] = rabin_miller.generate_large_prime(lowercase ) # select large prime number.
snake_case : Optional[int] = primitive_root(lowercase ) # one primitive root on modulo p.
snake_case : Optional[Any] = random.randrange(3 ,lowercase ) # private_key -> have to be greater than 2 for safety.
snake_case : Tuple = cryptomath.find_mod_inverse(pow(lowercase ,lowercase ,lowercase ) ,lowercase )
snake_case : str = (key_size, e_a, e_a, p)
snake_case : Optional[Any] = (key_size, d)
return public_key, private_key
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> None:
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print("""\nWARNING:""" )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
snake_case , snake_case : Optional[Any] = generate_key(lowercase )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
print("""Making key files...""" )
make_key_files("""elgamal""" ,2048 )
print("""Key files generation successful""" )
if __name__ == "__main__":
main()
| 684 | 1 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase = 1 / sqrt(2 ) ) -> IIRFilter:
snake_case : Dict = tau * frequency / samplerate
snake_case : int = sin(lowercase )
snake_case : Optional[Any] = cos(lowercase )
snake_case : List[Any] = _sin / (2 * q_factor)
snake_case : Tuple = (1 - _cos) / 2
snake_case : int = 1 - _cos
snake_case : Union[str, Any] = 1 + alpha
snake_case : List[Any] = -2 * _cos
snake_case : int = 1 - alpha
snake_case : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase = 1 / sqrt(2 ) ) -> IIRFilter:
snake_case : Tuple = tau * frequency / samplerate
snake_case : Union[str, Any] = sin(lowercase )
snake_case : Tuple = cos(lowercase )
snake_case : List[str] = _sin / (2 * q_factor)
snake_case : List[Any] = (1 + _cos) / 2
snake_case : Any = -1 - _cos
snake_case : Union[str, Any] = 1 + alpha
snake_case : int = -2 * _cos
snake_case : str = 1 - alpha
snake_case : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase = 1 / sqrt(2 ) ) -> IIRFilter:
snake_case : Union[str, Any] = tau * frequency / samplerate
snake_case : Any = sin(lowercase )
snake_case : str = cos(lowercase )
snake_case : List[Any] = _sin / (2 * q_factor)
snake_case : List[str] = _sin / 2
snake_case : Optional[Any] = 0
snake_case : List[str] = -ba
snake_case : str = 1 + alpha
snake_case : List[Any] = -2 * _cos
snake_case : List[str] = 1 - alpha
snake_case : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase = 1 / sqrt(2 ) ) -> IIRFilter:
snake_case : Optional[Any] = tau * frequency / samplerate
snake_case : Any = sin(lowercase )
snake_case : Optional[Any] = cos(lowercase )
snake_case : Dict = _sin / (2 * q_factor)
snake_case : Tuple = 1 - alpha
snake_case : List[str] = -2 * _cos
snake_case : List[Any] = 1 + alpha
snake_case : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] ,[ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase = 1 / sqrt(2 ) ,) -> IIRFilter:
snake_case : int = tau * frequency / samplerate
snake_case : Optional[Any] = sin(lowercase )
snake_case : str = cos(lowercase )
snake_case : Optional[int] = _sin / (2 * q_factor)
snake_case : Dict = 10 ** (gain_db / 40)
snake_case : str = 1 + alpha * big_a
snake_case : Optional[Any] = -2 * _cos
snake_case : Tuple = 1 - alpha * big_a
snake_case : Dict = 1 + alpha / big_a
snake_case : int = -2 * _cos
snake_case : Any = 1 - alpha / big_a
snake_case : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase = 1 / sqrt(2 ) ,) -> IIRFilter:
snake_case : Any = tau * frequency / samplerate
snake_case : Optional[int] = sin(lowercase )
snake_case : Optional[Any] = cos(lowercase )
snake_case : Dict = _sin / (2 * q_factor)
snake_case : List[str] = 10 ** (gain_db / 40)
snake_case : Tuple = (big_a + 1) - (big_a - 1) * _cos
snake_case : Any = (big_a + 1) + (big_a - 1) * _cos
snake_case : List[Any] = (big_a - 1) - (big_a + 1) * _cos
snake_case : List[Any] = (big_a - 1) + (big_a + 1) * _cos
snake_case : int = 2 * sqrt(lowercase ) * alpha
snake_case : List[Any] = big_a * (pmc + aaa)
snake_case : Dict = 2 * big_a * mpc
snake_case : List[Any] = big_a * (pmc - aaa)
snake_case : int = ppmc + aaa
snake_case : Dict = -2 * pmpc
snake_case : str = ppmc - aaa
snake_case : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase = 1 / sqrt(2 ) ,) -> IIRFilter:
snake_case : Union[str, Any] = tau * frequency / samplerate
snake_case : Tuple = sin(lowercase )
snake_case : Any = cos(lowercase )
snake_case : Tuple = _sin / (2 * q_factor)
snake_case : int = 10 ** (gain_db / 40)
snake_case : Optional[Any] = (big_a + 1) - (big_a - 1) * _cos
snake_case : Dict = (big_a + 1) + (big_a - 1) * _cos
snake_case : Union[str, Any] = (big_a - 1) - (big_a + 1) * _cos
snake_case : List[Any] = (big_a - 1) + (big_a + 1) * _cos
snake_case : Dict = 2 * sqrt(lowercase ) * alpha
snake_case : List[Any] = big_a * (ppmc + aaa)
snake_case : List[Any] = -2 * big_a * pmpc
snake_case : str = big_a * (ppmc - aaa)
snake_case : List[str] = pmc + aaa
snake_case : List[str] = 2 * mpc
snake_case : Union[str, Any] = pmc - aaa
snake_case : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
| 684 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
snake_case : Dict = _modexpt(lowercase ,exponent // 2 ,lowercase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowercase ,exponent - 1 ,lowercase )) % modulo_value
def SCREAMING_SNAKE_CASE__ ( lowercase = 1777 ,lowercase = 1855 ,lowercase = 8 ) -> int:
snake_case : int = base
for _ in range(1 ,lowercase ):
snake_case : List[str] = _modexpt(lowercase ,lowercase ,10**digits )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[Any]: # noqa: E741
snake_case : str = len(lowercase )
snake_case : Optional[Any] = 0
snake_case : Union[str, Any] = [0] * n
snake_case : List[Any] = [False] * n
snake_case : Optional[Any] = [False] * n
def dfs(lowercase ,lowercase ,lowercase ,lowercase ):
if parent == root:
out_edge_count += 1
snake_case : Tuple = True
snake_case : Any = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
snake_case : Any = dfs(lowercase ,lowercase ,lowercase ,lowercase )
snake_case : Dict = min(low[at] ,low[to] )
# AP found via bridge
if at < low[to]:
snake_case : List[str] = True
# AP found via cycle
if at == low[to]:
snake_case : Dict = True
else:
snake_case : str = min(low[at] ,lowercase )
return out_edge_count
for i in range(lowercase ):
if not visited[i]:
snake_case : Tuple = 0
snake_case : Any = dfs(lowercase ,lowercase ,-1 ,lowercase )
snake_case : str = out_edge_count > 1
for x in range(len(lowercase ) ):
if is_art[x] is True:
print(lowercase )
# Adjacency list of graph
lowerCamelCase : int = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 684 |
from itertools import product
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list[int]:
snake_case : Tuple = sides_number
snake_case : List[str] = max_face_number * dice_number
snake_case : Any = [0] * (max_total + 1)
snake_case : int = 1
snake_case : List[str] = range(lowercase ,max_face_number + 1 )
for dice_numbers in product(lowercase ,repeat=lowercase ):
snake_case : Any = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def SCREAMING_SNAKE_CASE__ ( ) -> float:
snake_case : List[str] = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
snake_case : str = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
snake_case : Optional[int] = 0
snake_case : List[str] = 9
snake_case : Union[str, Any] = 4 * 9
snake_case : Dict = 6
for peter_total in range(lowercase ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
snake_case : str = (4**9) * (6**6)
snake_case : int = peter_wins_count / total_games_number
snake_case : Optional[int] = round(lowercase ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list:
snake_case : Union[str, Any] = len(lowercase )
for i in range(1 ,lowercase ):
snake_case : Union[str, Any] = collection[i]
snake_case : List[Any] = 0
snake_case : Optional[int] = i - 1
while low <= high:
snake_case : Dict = (low + high) // 2
if val < collection[mid]:
snake_case : str = mid - 1
else:
snake_case : Any = mid + 1
for j in range(lowercase ,lowercase ,-1 ):
snake_case : Optional[int] = collection[j - 1]
snake_case : Any = val
return collection
if __name__ == "__main__":
lowerCamelCase : str = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase : Dict = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 684 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 684 | 1 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def UpperCAmelCase ( self , A ) -> float:
return 0.0
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> tuple[int | float, int | float]:
snake_case : Any = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
snake_case : Optional[int] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> None:
snake_case : Union[str, Any] = 512
snake_case : str = [1] + [0] * (size - 1)
snake_case : Optional[Any] = [filter_type.process(lowercase ) for item in inputs]
snake_case : List[str] = [0] * (samplerate - size) # zero-padding
outputs += filler
snake_case : Union[str, Any] = np.abs(np.fft.fft(lowercase ) )
snake_case : List[str] = 20 * np.logaa(lowercase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 ,samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
snake_case : Optional[int] = get_bounds(lowercase ,lowercase )
plt.ylim(max([-80, bounds[0]] ) ,min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(lowercase )
plt.show()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> None:
snake_case : Optional[int] = 512
snake_case : Any = [1] + [0] * (size - 1)
snake_case : Optional[Any] = [filter_type.process(lowercase ) for item in inputs]
snake_case : str = [0] * (samplerate - size) # zero-padding
outputs += filler
snake_case : Any = np.angle(np.fft.fft(lowercase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 ,samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi ,2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(lowercase ,-2 * pi ) )
plt.show()
| 684 |
import os
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
with open(os.path.dirname(lowercase ) + """/grid.txt""" ) as f:
snake_case : Tuple = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowercase ) for x in f.readline().split()] )
snake_case : Optional[Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
snake_case : List[Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
snake_case : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
snake_case : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
snake_case : str = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
snake_case : int = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
snake_case : int = temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
snake_case : Any = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
snake_case : Any = temp
return maximum
if __name__ == "__main__":
print(solution())
| 684 | 1 |
import datasets
from .evaluate import evaluate
lowerCamelCase : Union[str, Any] = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
lowerCamelCase : Optional[int] = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
lowerCamelCase : Dict = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase (datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def UpperCAmelCase ( self , A , A ) -> Any:
snake_case : Union[str, Any] = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
snake_case : int = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
snake_case : int = evaluate(dataset=A , predictions=A )
return score
| 684 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list:
for i in range(len(lowercase ) - 1 ,0 ,-1 ):
snake_case : Any = False
for j in range(lowercase ,0 ,-1 ):
if unsorted[j] < unsorted[j - 1]:
snake_case , snake_case : Optional[Any] = unsorted[j - 1], unsorted[j]
snake_case : Dict = True
for j in range(lowercase ):
if unsorted[j] > unsorted[j + 1]:
snake_case , snake_case : Dict = unsorted[j + 1], unsorted[j]
snake_case : Tuple = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Any = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase : Optional[int] = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 684 | 1 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[str]:
snake_case : int = 1_0
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Optional[Any] = [1, 2, 3, 4]
snake_case : str = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(A , self.block_size , 0 ) , A )
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
snake_case : List[str] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(A , self.block_size , 0 ) , A )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3]
snake_case : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(A , self.block_size , 0 ) , A )
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Tuple = """It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this."""
snake_case , snake_case : Optional[Any] = process_story(A )
self.assertEqual(A , [] )
def UpperCAmelCase ( self ) -> str:
snake_case : Dict = """"""
snake_case , snake_case : Tuple = process_story(A )
self.assertEqual(A , [] )
self.assertEqual(A , [] )
def UpperCAmelCase ( self ) -> List[str]:
snake_case : int = (
"""It was the year of Our Lord one thousand seven hundred and """
"""seventy-five\n\nSpiritual revelations were conceded to England """
"""at that favoured period, as at this.\n@highlight\n\nIt was the best of times"""
)
snake_case , snake_case : List[Any] = process_story(A )
snake_case : str = [
"""It was the year of Our Lord one thousand seven hundred and seventy-five.""",
"""Spiritual revelations were conceded to England at that favoured period, as at this.""",
]
self.assertEqual(A , A )
snake_case : List[str] = ["""It was the best of times."""]
self.assertEqual(A , A )
def UpperCAmelCase ( self ) -> Any:
snake_case : Dict = torch.tensor([1, 2, 3, 4] )
snake_case : Tuple = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(A , 0 ).numpy() , expected.numpy() )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : List[Any] = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] )
snake_case : Dict = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(A , 2_3 ).numpy() , expected.numpy() )
def UpperCAmelCase ( self ) -> Any:
snake_case : List[str] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
snake_case : Dict = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(A , 1 ).numpy() , expected.numpy() )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Dict = 1_0_1
snake_case : int = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] )
snake_case : Optional[int] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
snake_case : List[Any] = compute_token_type_ids(A , A )
np.testing.assert_array_equal(A , A )
| 684 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Any = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
lowerCamelCase : Any = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
lowerCamelCase : Optional[int] = {
'jukebox': 5_1_2,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_LYRIC_TOKENS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A , A , A=["v3", "v2", "v2"] , A=5_1_2 , A=5 , A="<|endoftext|>" , **A , ) -> Optional[Any]:
snake_case : Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
super().__init__(
unk_token=A , n_genres=A , version=A , max_n_lyric_tokens=A , **A , )
snake_case : Optional[Any] = version
snake_case : Optional[Any] = max_n_lyric_tokens
snake_case : Tuple = n_genres
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : Union[str, Any] = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : str = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : List[str] = json.load(A )
snake_case : Tuple = r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 7_9:
snake_case : Optional[Any] = oov.replace(r"""\-'""" , r"""\-+'""" )
snake_case : Optional[Any] = regex.compile(A )
snake_case : Optional[Any] = {v: k for k, v in self.artists_encoder.items()}
snake_case : int = {v: k for k, v in self.genres_encoder.items()}
snake_case : List[Any] = {v: k for k, v in self.lyrics_encoder.items()}
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def UpperCAmelCase ( self ) -> str:
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def UpperCAmelCase ( self , A , A , A ) -> Optional[Any]:
snake_case : Optional[int] = [self.artists_encoder.get(A , 0 ) for artist in list_artists]
for genres in range(len(A ) ):
snake_case : Optional[int] = [self.genres_encoder.get(A , 0 ) for genre in list_genres[genres]]
snake_case : Union[str, Any] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
snake_case : Optional[Any] = [[self.lyrics_encoder.get(A , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def UpperCAmelCase ( self , A ) -> List[str]:
return list(A )
def UpperCAmelCase ( self , A , A , A , **A ) -> List[str]:
snake_case , snake_case , snake_case : Any = self.prepare_for_tokenization(A , A , A )
snake_case : Tuple = self._tokenize(A )
return artist, genre, lyrics
def UpperCAmelCase ( self , A , A , A , A = False ) -> Tuple[str, str, str, Dict[str, Any]]:
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
snake_case : Tuple = artists[idx].lower()
snake_case : List[Any] = [genres[idx].lower()]
else:
snake_case : Union[str, Any] = self._normalize(artists[idx] ) + """.v2"""
snake_case : Any = [
self._normalize(A ) + """.v2""" for genre in genres[idx].split("""_""" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
snake_case : str = regex.compile(r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" )
snake_case : Dict = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
snake_case : Union[str, Any] = {vocab[index]: index + 1 for index in range(len(A ) )}
snake_case : Optional[int] = 0
snake_case : Union[str, Any] = len(A ) + 1
snake_case : Optional[int] = self.vocab
snake_case : str = {v: k for k, v in self.vocab.items()}
snake_case : int = """"""
else:
snake_case : Optional[int] = regex.compile(r"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""" )
snake_case : int = self._run_strip_accents(A )
snake_case : Any = lyrics.replace("""\\""" , """\n""" )
snake_case : Tuple = self.out_of_vocab.sub("""""" , A ), [], []
return artists, genres, lyrics
def UpperCAmelCase ( self , A ) -> List[Any]:
snake_case : int = unicodedata.normalize("""NFD""" , A )
snake_case : int = []
for char in text:
snake_case : Optional[Any] = unicodedata.category(A )
if cat == "Mn":
continue
output.append(A )
return "".join(A )
def UpperCAmelCase ( self , A ) -> str:
snake_case : Dict = (
[chr(A ) for i in range(ord("""a""" ) , ord("""z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""A""" ) , ord("""Z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""0""" ) , ord("""9""" ) + 1 )]
+ ["""."""]
)
snake_case : Dict = frozenset(A )
snake_case : Dict = re.compile(r"""_+""" )
snake_case : str = """""".join([c if c in accepted else """_""" for c in text.lower()] )
snake_case : List[Any] = pattern.sub("""_""" , A ).strip("""_""" )
return text
def UpperCAmelCase ( self , A ) -> str:
return " ".join(A )
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[Any]:
# Convert to TensorType
if not isinstance(A , A ):
snake_case : Tuple = TensorType(A )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""" )
import tensorflow as tf
snake_case : Union[str, Any] = tf.constant
snake_case : int = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""" )
import torch
snake_case : List[str] = torch.tensor
snake_case : Optional[Any] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""" )
import jax.numpy as jnp # noqa: F811
snake_case : Optional[int] = jnp.array
snake_case : Dict = _is_jax
else:
snake_case : List[str] = np.asarray
snake_case : Tuple = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
snake_case : Any = [inputs]
if not is_tensor(A ):
snake_case : List[Any] = as_tensor(A )
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""" )
return inputs
def __call__( self , A , A , A="" , A="pt" ) -> BatchEncoding:
snake_case : List[str] = [0, 0, 0]
snake_case : List[str] = [artist] * len(self.version )
snake_case : List[Any] = [genres] * len(self.version )
snake_case , snake_case , snake_case : Optional[int] = self.tokenize(A , A , A )
snake_case , snake_case , snake_case : int = self._convert_token_to_id(A , A , A )
snake_case : Any = [-INFINITY] * len(full_tokens[-1] )
snake_case : int = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=A )
for i in range(len(self.version ) )
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks} )
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=A ) )
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=A ) )
snake_case : Tuple = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=A ) )
return (artists_file, genres_file, lyrics_file)
def UpperCAmelCase ( self , A , A , A ) -> List[Any]:
snake_case : Optional[int] = self.artists_decoder.get(A )
snake_case : Optional[Any] = [self.genres_decoder.get(A ) for genre in genres_index]
snake_case : Optional[int] = [self.lyrics_decoder.get(A ) for character in lyric_index]
return artist, genres, lyrics
| 684 | 1 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Tuple:
# A local function to see if a dot lands in the circle.
def is_in_circle(lowercase ,lowercase ) -> bool:
snake_case : Tuple = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
snake_case : Union[str, Any] = mean(
int(is_in_circle(uniform(-1.0 ,1.0 ) ,uniform(-1.0 ,1.0 ) ) )
for _ in range(lowercase ) )
# The ratio of the area for circle to square is pi/4.
snake_case : Dict = proportion * 4
print(f"""The estimated value of pi is {pi_estimate}""" )
print(f"""The numpy value of pi is {pi}""" )
print(f"""The total error is {abs(pi - pi_estimate )}""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase = 0.0 ,lowercase = 1.0 ,) -> float:
return mean(
function_to_integrate(uniform(lowercase ,lowercase ) ) for _ in range(lowercase ) ) * (max_value - min_value)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase = 0.0 ,lowercase = 1.0 ) -> None:
def identity_function(lowercase ) -> float:
return x
snake_case : Union[str, Any] = area_under_curve_estimator(
lowercase ,lowercase ,lowercase ,lowercase )
snake_case : Tuple = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(f"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {expected_value}""" )
print(f"""Total error is {abs(estimated_value - expected_value )}""" )
print("""******************""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> None:
def function_to_integrate(lowercase ) -> float:
return sqrt(4.0 - x * x )
snake_case : Optional[Any] = area_under_curve_estimator(
lowercase ,lowercase ,0.0 ,2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {pi}""" )
print(f"""Total error is {abs(estimated_value - pi )}""" )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
snake_case : str = len(lowercase )
snake_case : Tuple = []
for i in range(len(lowercase ) - pat_len + 1 ):
snake_case : str = True
for j in range(lowercase ):
if s[i + j] != pattern[j]:
snake_case : Dict = False
break
if match_found:
position.append(lowercase )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 684 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
snake_case : Dict = _modexpt(lowercase ,exponent // 2 ,lowercase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowercase ,exponent - 1 ,lowercase )) % modulo_value
def SCREAMING_SNAKE_CASE__ ( lowercase = 1777 ,lowercase = 1855 ,lowercase = 8 ) -> int:
snake_case : int = base
for _ in range(1 ,lowercase ):
snake_case : List[str] = _modexpt(lowercase ,lowercase ,10**digits )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 |
import numpy as np
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 1 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
lowerCamelCase : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , A , A , A , A , A , A , A , A , A , ) -> Any:
super().__init__()
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=A , speech_processor=A , vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , feature_extractor=A , )
def UpperCAmelCase ( self , A = "auto" ) -> Dict:
if slice_size == "auto":
snake_case : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def UpperCAmelCase ( self ) -> int:
self.enable_attention_slicing(A )
@torch.no_grad()
def __call__( self , A , A=1_6_0_0_0 , A = 5_1_2 , A = 5_1_2 , A = 5_0 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , **A , ) -> Tuple:
snake_case : List[Any] = self.speech_processor.feature_extractor(
A , return_tensors="""pt""" , sampling_rate=A ).input_features.to(self.device )
snake_case : str = self.speech_model.generate(A , max_length=4_8_0_0_0_0 )
snake_case : int = self.speech_processor.tokenizer.batch_decode(A , skip_special_tokens=A , normalize=A )[
0
]
if isinstance(A , A ):
snake_case : Tuple = 1
elif isinstance(A , A ):
snake_case : Union[str, Any] = len(A )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(A )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(A )}.""" )
# get prompt text embeddings
snake_case : Dict = self.tokenizer(
A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
snake_case : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
snake_case : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
snake_case : int = text_input_ids[:, : self.tokenizer.model_max_length]
snake_case : List[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
snake_case , snake_case , snake_case : Dict = text_embeddings.shape
snake_case : List[str] = text_embeddings.repeat(1 , A , 1 )
snake_case : Optional[int] = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
snake_case : List[Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
snake_case : List[str]
if negative_prompt is None:
snake_case : Dict = [""""""] * batch_size
elif type(A ) is not type(A ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(A )} !="""
f""" {type(A )}.""" )
elif isinstance(A , A ):
snake_case : Optional[int] = [negative_prompt]
elif batch_size != len(A ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
snake_case : Dict = negative_prompt
snake_case : Union[str, Any] = text_input_ids.shape[-1]
snake_case : Any = self.tokenizer(
A , padding="""max_length""" , max_length=A , truncation=A , return_tensors="""pt""" , )
snake_case : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case : int = uncond_embeddings.shape[1]
snake_case : Tuple = uncond_embeddings.repeat(1 , A , 1 )
snake_case : List[str] = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case : Dict = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
snake_case : Any = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
snake_case : int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
snake_case : str = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to(
self.device )
else:
snake_case : Optional[int] = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
snake_case : Tuple = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
snake_case : int = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
snake_case : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case : Optional[int] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case : int = {}
if accepts_eta:
snake_case : Any = eta
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
snake_case : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case : List[Any] = self.scheduler.scale_model_input(A , A )
# predict the noise residual
snake_case : List[Any] = self.unet(A , A , encoder_hidden_states=A ).sample
# perform guidance
if do_classifier_free_guidance:
snake_case , snake_case : str = noise_pred.chunk(2 )
snake_case : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
snake_case : Optional[Any] = self.scheduler.step(A , A , A , **A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A , A )
snake_case : List[str] = 1 / 0.1_82_15 * latents
snake_case : int = self.vae.decode(A ).sample
snake_case : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case : Union[str, Any] = self.numpy_to_pil(A )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 684 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase : Tuple = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 684 | 1 |
from __future__ import annotations
from collections import namedtuple
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> tuple:
snake_case : Tuple = namedtuple("""result""" ,"""name value""" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("""Only one argument must be 0""" )
elif power < 0:
raise ValueError(
"""Power cannot be negative in any electrical/electronics system""" )
elif voltage == 0:
return result("""voltage""" ,power / current )
elif current == 0:
return result("""current""" ,power / voltage )
elif power == 0:
return result("""power""" ,float(round(abs(voltage * current ) ,2 ) ) )
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 |
lowerCamelCase : Union[str, Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCamelCase : Tuple = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCamelCase : Union[str, Any] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 684 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
lowerCamelCase : Optional[Any] = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
lowerCamelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 684 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'vocab_file': 'spm_char.model'}
lowerCamelCase : List[str] = {
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
lowerCamelCase : List[Any] = {
'microsoft/speecht5_asr': 1_0_2_4,
'microsoft/speecht5_tts': 1_0_2_4,
'microsoft/speecht5_vc': 1_0_2_4,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A="<s>" , A="</s>" , A="<unk>" , A="<pad>" , A = None , **A , ) -> None:
snake_case : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , pad_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
snake_case : Tuple = vocab_file
snake_case : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def UpperCAmelCase ( self ) -> List[Any]:
return self.sp_model.get_piece_size()
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Any = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[str]:
snake_case : Optional[Any] = self.__dict__.copy()
snake_case : Optional[Any] = None
return state
def __setstate__( self , A ) -> Tuple:
snake_case : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case : List[Any] = {}
snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def UpperCAmelCase ( self , A ) -> Tuple:
return self.sp_model.piece_to_id(A )
def UpperCAmelCase ( self , A ) -> int:
snake_case : Union[str, Any] = self.sp_model.IdToPiece(A )
return token
def UpperCAmelCase ( self , A ) -> Tuple:
snake_case : Optional[int] = []
snake_case : str = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A ) + token
snake_case : Dict = []
else:
current_sub_tokens.append(A )
out_string += self.sp_model.decode(A )
return out_string.strip()
def UpperCAmelCase ( self , A , A=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
snake_case : Any = [1]
if token_ids_a is None:
return ([0] * len(A )) + suffix_ones
return ([0] * len(A )) + ([0] * len(A )) + suffix_ones
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Optional[Any] = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , """wb""" ) as fi:
snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 684 | 1 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowerCamelCase : List[str] = {
'/attention/': '/0/SelfAttention/',
'/self_attention/': '/0/SelfAttention/',
'/encoder_decoder_attention/': '/1/EncDecAttention/',
'value': 'v',
'query': 'q',
'key': 'k',
'out': 'o',
'pre_self_attention_layer_norm': '0/layer_norm',
'pre_cross_attention_layer_norm': '1/layer_norm',
'pre_attention_layer_norm': '0/layer_norm', # previously 1, but seems wrong
'token_embedder': 'shared',
'encoder_norm': 'final_layer_norm',
'decoder_norm': 'final_layer_norm',
'relpos_bias/rel_embedding': 'block/0/layer/0/SelfAttention/relative_attention_bias/weight',
'router/router_weights/w/': 'router/classifier/',
'roer/roer_weights/w/': 'router/classifier/',
'logits_dense': 'lm_head',
}
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Any:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
snake_case : List[Any] = list(s_dict.keys() )
for key in keys:
snake_case : int = R""".*/layers_(\d+)"""
snake_case : Optional[int] = key
if re.match(lowercase ,lowercase ):
snake_case : Optional[Any] = re.sub(R"""layers_(\d+)""" ,R"""block/\1/layer""" ,lowercase )
snake_case : int = R"""(encoder|decoder)\/"""
if re.match(lowercase ,lowercase ):
snake_case : Tuple = re.match(lowercase ,lowercase ).groups()
if groups[0] == "encoder":
snake_case : List[str] = re.sub(R"""/mlp/""" ,R"""/1/mlp/""" ,lowercase )
snake_case : Optional[int] = re.sub(R"""/pre_mlp_layer_norm/""" ,R"""/1/layer_norm/""" ,lowercase )
elif groups[0] == "decoder":
snake_case : List[str] = re.sub(R"""/mlp/""" ,R"""/2/mlp/""" ,lowercase )
snake_case : List[Any] = re.sub(R"""/pre_mlp_layer_norm/""" ,R"""/2/layer_norm/""" ,lowercase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
snake_case : Optional[int] = new_key.replace(lowercase ,lowercase )
print(f"""{key} -> {new_key}""" )
snake_case : str = s_dict.pop(lowercase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
snake_case : List[str] = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
snake_case : Any = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
snake_case : Tuple = s_dict[key].shape[0]
snake_case : str = s_dict[key]
for idx in range(lowercase ):
snake_case : List[str] = expert_weihts[idx]
print(f"""{key} -> {key.replace("expert/" ,"nested fstring" )}""" )
s_dict.pop(lowercase )
return s_dict
lowerCamelCase : str = {
'NUM_ENCODER_LAYERS': 'num_layers',
'NUM_DECODER_LAYERS': 'num_decoder_layers',
'NUM_HEADS': 'num_heads',
'HEAD_DIM': 'd_kv',
'EMBED_DIM': 'd_model',
'MLP_DIM': 'd_ff',
'NUM_SELECTED_EXPERTS': 'num_selected_experts',
'NUM_ENCODER_SPARSE_LAYERS': 'num_sparse_encoder_layers',
'NUM_DECODER_SPARSE_LAYERS': 'num_sparse_decoder_layers',
'dense.MlpBlock.activations': 'feed_forward_proj',
}
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Optional[int]:
# Convert a google style config to the hugging face fromat
import regex as re
with open(lowercase ,"""r""" ) as f:
snake_case : List[str] = f.read()
snake_case : List[Any] = re.findall(R"""(.*) = ([0-9.]*)""" ,lowercase )
snake_case : Optional[int] = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
snake_case : int = float(lowercase ) if """.""" in value else int(lowercase )
snake_case : Tuple = re.findall(R"""(.*activations) = \(\'(.*)\',\)""" ,lowercase )[0]
snake_case : int = str(activation[1] )
snake_case : int = num_experts
snake_case : Union[str, Any] = SwitchTransformersConfig(**lowercase )
return config
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase=None ,lowercase="./" ,lowercase=8 ) -> List[str]:
# Initialise PyTorch model
print(f"""Loading flax weights from : {flax_checkpoint_path}""" )
snake_case : Tuple = checkpoints.load_tax_checkpoint(lowercase )
if gin_file is not None:
snake_case : Union[str, Any] = convert_gin_to_config(lowercase ,lowercase )
else:
snake_case : Dict = SwitchTransformersConfig.from_pretrained(lowercase )
snake_case : Tuple = SwitchTransformersForConditionalGeneration(lowercase )
snake_case : Optional[int] = flax_params["""target"""]
snake_case : Dict = flatten_dict(lowercase ,sep="""/""" )
snake_case : int = rename_keys(lowercase )
snake_case : Dict = unflatten_dict(lowercase ,sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowercase ,lowercase )
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'
' model architecture. If not provided, a `gin_file` has to be provided.'
),
)
parser.add_argument(
'--gin_file',
default=None,
type=str,
required=False,
help='Path to the gin config file. If not provided, a `config_file` has to be passed ',
)
parser.add_argument(
'--config_name', default=None, type=str, required=False, help='Config name of SwitchTransformers model.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output pytorch model.'
)
parser.add_argument('--num_experts', default=8, type=int, required=False, help='Number of experts')
lowerCamelCase : str = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 684 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """gpt_neox_japanese"""
def __init__( self , A=3_2_0_0_0 , A=2_5_6_0 , A=3_2 , A=3_2 , A=4 , A="gelu" , A=1.00 , A=1_0_0_0_0 , A=2_0_4_8 , A=0.02 , A=1e-5 , A=True , A=3_1_9_9_6 , A=3_1_9_9_9 , A=0.1 , A=0.0 , **A , ) -> str:
super().__init__(bos_token_id=A , eos_token_id=A , **A )
snake_case : Optional[Any] = vocab_size
snake_case : Optional[Any] = max_position_embeddings
snake_case : Union[str, Any] = hidden_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : Optional[int] = intermediate_multiple_size
snake_case : int = hidden_act
snake_case : str = rotary_pct
snake_case : Optional[Any] = rotary_emb_base
snake_case : Any = initializer_range
snake_case : Any = layer_norm_eps
snake_case : Optional[Any] = use_cache
snake_case : Tuple = attention_dropout
snake_case : Tuple = hidden_dropout
| 684 | 1 |
import requests
lowerCamelCase : int = 'https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> None:
# fetching a list of articles in json format
snake_case : Optional[int] = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["""articles"""] ,1 ):
print(f"""{i}.) {article["title"]}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='<Your BBC News API key goes here>')
| 684 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Optional[Any] = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
snake_case : Any = hex_num[0] == """-"""
if is_negative:
snake_case : int = hex_num[1:]
try:
snake_case : List[Any] = int(lowercase ,16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
snake_case : Dict = """"""
while int_num > 0:
snake_case : Dict = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 1 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@parameterized.expand([(None,), ("""foo.json""",)] )
def UpperCAmelCase ( self , A ) -> Optional[Any]:
snake_case : Any = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A , config_name=A )
snake_case : Optional[Any] = GenerationConfig.from_pretrained(A , config_name=A )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0 )
self.assertEqual(loaded_config.max_length , 2_0 )
self.assertEqual(loaded_config.max_time , A )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : str = AutoConfig.from_pretrained("""gpt2""" )
snake_case : Optional[int] = GenerationConfig.from_model_config(A )
snake_case : Dict = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(A , A )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase ( self ) -> Dict:
snake_case : List[Any] = GenerationConfig()
snake_case : Union[str, Any] = {
"""max_new_tokens""": 1_0_2_4,
"""foo""": """bar""",
}
snake_case : int = copy.deepcopy(A )
snake_case : Optional[Any] = generation_config.update(**A )
# update_kwargs was not modified (no side effects)
self.assertEqual(A , A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(A , {"""foo""": """bar"""} )
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Optional[Any] = GenerationConfig()
snake_case : Optional[int] = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(A )
snake_case : str = GenerationConfig.from_pretrained(A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
snake_case : int = GenerationConfig.from_model_config(A )
assert not hasattr(A , """foo""" ) # no new kwargs should be initialized if from config
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Tuple = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , A )
self.assertEqual(default_config.num_beams , 1 )
snake_case : List[Any] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , A )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A )
snake_case : Any = GenerationConfig.from_pretrained(A , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase ( cls ) -> str:
snake_case : int = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCAmelCase ( cls ) -> Optional[Any]:
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def UpperCAmelCase ( self ) -> Dict:
snake_case : List[str] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
snake_case : str = GenerationConfig.from_pretrained(f"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id="""test-generation-config""" , push_to_hub=A , use_auth_token=self._token )
snake_case : Optional[int] = GenerationConfig.from_pretrained(f"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Dict = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
snake_case : Union[str, Any] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=A , use_auth_token=self._token )
snake_case : Union[str, Any] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
| 684 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""pixel_values"""]
def __init__( self , A = True , A = None , A = PIL.Image.BICUBIC , A = True , A = None , A = 1 / 2_5_5 , A = True , A = True , A = None , A = None , **A , ) -> None:
super().__init__(**A )
snake_case : int = size if size is not None else {"""height""": 2_5_6, """width""": 2_5_6}
snake_case : int = get_size_dict(A )
snake_case : Optional[Any] = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
snake_case : Dict = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = do_resize
snake_case : str = size
snake_case : Tuple = resample
snake_case : Any = do_center_crop
snake_case : Tuple = crop_size
snake_case : int = do_rescale
snake_case : Dict = rescale_factor
snake_case : Union[str, Any] = do_normalize
snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase ( self , A , A , A = PIL.Image.BICUBIC , A = None , **A , ) -> np.ndarray:
snake_case : Dict = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
A , size=(size["""height"""], size["""width"""]) , resample=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> np.ndarray:
snake_case : Any = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(A , size=(size["""height"""], size["""width"""]) , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> Tuple:
return rescale(A , scale=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A , A = None , **A , ) -> np.ndarray:
return normalize(A , mean=A , std=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A = None , A = None , A=None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
snake_case : str = do_resize if do_resize is not None else self.do_resize
snake_case : Dict = resample if resample is not None else self.resample
snake_case : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Tuple = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case : int = image_mean if image_mean is not None else self.image_mean
snake_case : List[str] = image_std if image_std is not None else self.image_std
snake_case : Dict = size if size is not None else self.size
snake_case : Tuple = get_size_dict(A )
snake_case : Dict = crop_size if crop_size is not None else self.crop_size
snake_case : List[str] = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case : Optional[Any] = [to_numpy_array(A ) for image in images]
if do_resize:
snake_case : Dict = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_center_crop:
snake_case : List[str] = [self.center_crop(image=A , size=A ) for image in images]
if do_rescale:
snake_case : List[str] = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
snake_case : str = [self.normalize(image=A , mean=A , std=A ) for image in images]
snake_case : Union[str, Any] = [to_channel_dimension_format(A , A ) for image in images]
snake_case : List[Any] = {"""pixel_values""": images}
return BatchFeature(data=A , tensor_type=A )
| 684 | 1 |
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
snake_case : Optional[Any] = []
snake_case : Optional[int] = 1
while len(lowercase ) < 1E6:
constant.append(str(lowercase ) )
i += 1
snake_case : Optional[Any] = """""".join(lowercase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 684 |
import inspect
import unittest
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase ( self ) -> Tuple:
import diffusers
from diffusers.dependency_versions_table import deps
snake_case : List[str] = inspect.getmembers(A , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
snake_case : Tuple = """k-diffusion"""
elif backend == "invisible_watermark":
snake_case : Optional[int] = """invisible-watermark"""
assert backend in deps, f"""{backend} is not in the deps table!"""
| 684 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = (
"""This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."""
"""It takes two arguments named `image` which should be the original image, and `label` which should be a text """
"""describing the elements what should be identified in the segmentation mask. The tool returns the mask."""
)
_snake_case = """CIDAS/clipseg-rd64-refined"""
_snake_case = """image_segmenter"""
_snake_case = CLIPSegForImageSegmentation
_snake_case = ["""image""", """text"""]
_snake_case = ["""image"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""vision"""] )
super().__init__(*A , **A )
def UpperCAmelCase ( self , A , A ) -> List[str]:
return self.pre_processor(text=[label] , images=[image] , padding=A , return_tensors="""pt""" )
def UpperCAmelCase ( self , A ) -> List[str]:
with torch.no_grad():
snake_case : Tuple = self.model(**A ).logits
return logits
def UpperCAmelCase ( self , A ) -> Optional[int]:
snake_case : str = outputs.cpu().detach().numpy()
snake_case : str = 0
snake_case : Optional[int] = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta ) )
| 684 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
lowerCamelCase : Union[str, Any] = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
lowerCamelCase : List[Any] = 'main'
# Default branch name
lowerCamelCase : Tuple = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
lowerCamelCase : List[Any] = 'aaaaaaa'
# This commit does not exist, so we should 404.
lowerCamelCase : List[Any] = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
lowerCamelCase : int = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> int:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> Optional[Any]:
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def UpperCAmelCase ( self ) -> Optional[Any]:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_tf
def UpperCAmelCase ( self ) -> str:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_flax
def UpperCAmelCase ( self ) -> Any:
# Flax models don't have labels
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , [] )
| 684 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase : List[str] = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowerCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 684 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """audio-spectrogram-transformer"""
def __init__( self , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.0 , A=0.0 , A=0.02 , A=1e-1_2 , A=1_6 , A=True , A=1_0 , A=1_0 , A=1_0_2_4 , A=1_2_8 , **A , ) -> int:
super().__init__(**A )
snake_case : Any = hidden_size
snake_case : Tuple = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Dict = intermediate_size
snake_case : int = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : int = initializer_range
snake_case : int = layer_norm_eps
snake_case : Any = patch_size
snake_case : List[Any] = qkv_bias
snake_case : int = frequency_stride
snake_case : Any = time_stride
snake_case : Union[str, Any] = max_length
snake_case : Any = num_mel_bins
| 684 | 1 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowerCamelCase : Optional[int] = TypeVar('T')
class __lowercase (Generic[T] ):
"""simple docstring"""
_snake_case = 42 # Cache store of keys
_snake_case = 42 # References of the keys in cache
_snake_case = 10 # Maximum capacity of cache
def __init__( self , A ) -> None:
snake_case : str = deque()
snake_case : Tuple = set()
if not n:
snake_case : str = sys.maxsize
elif n < 0:
raise ValueError("""n should be an integer greater than 0.""" )
else:
snake_case : str = n
def UpperCAmelCase ( self , A ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
snake_case : str = self.dq_store.pop()
self.key_reference.remove(A )
else:
self.dq_store.remove(A )
self.dq_store.appendleft(A )
self.key_reference.add(A )
def UpperCAmelCase ( self ) -> None:
for k in self.dq_store:
print(A )
def __repr__( self ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 684 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase : Any = logging.get_logger(__name__)
class __lowercase (enum.Enum ):
"""simple docstring"""
_snake_case = 0
_snake_case = 1
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """generated"""
def __init__( self , *A , **A ) -> Optional[Any]:
super().__init__(*A , **A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase ( self , A=None , A=None , A=None , A=None , A=None , A=None , **A , ) -> Optional[int]:
snake_case : Tuple = {}
if truncation is not None:
snake_case : Union[str, Any] = truncation
snake_case : Dict = generate_kwargs
snake_case : int = {}
if return_tensors is not None and return_type is None:
snake_case : List[Any] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
snake_case : List[str] = return_type
if clean_up_tokenization_spaces is not None:
snake_case : int = clean_up_tokenization_spaces
if stop_sequence is not None:
snake_case : Tuple = self.tokenizer.encode(A , add_special_tokens=A )
if len(A ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
snake_case : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
return True
def UpperCAmelCase ( self , *A , A ) -> Tuple:
snake_case : Union[str, Any] = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , A ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
snake_case : Union[str, Any] = ([prefix + arg for arg in args[0]],)
snake_case : List[Any] = True
elif isinstance(args[0] , A ):
snake_case : str = (prefix + args[0],)
snake_case : str = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
snake_case : Optional[Any] = self.tokenizer(*A , padding=A , truncation=A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *A , **A ) -> Union[str, Any]:
snake_case : Tuple = super().__call__(*A , **A )
if (
isinstance(args[0] , A )
and all(isinstance(A , A ) for el in args[0] )
and all(len(A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase ( self , A , A=TruncationStrategy.DO_NOT_TRUNCATE , **A ) -> str:
snake_case : Optional[Any] = self._parse_and_tokenize(A , truncation=A , **A )
return inputs
def UpperCAmelCase ( self , A , **A ) -> Tuple:
if self.framework == "pt":
snake_case , snake_case : List[str] = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
snake_case , snake_case : Optional[Any] = tf.shape(model_inputs["""input_ids"""] ).numpy()
snake_case : Dict = generate_kwargs.get("""min_length""" , self.model.config.min_length )
snake_case : str = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(A , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
snake_case : List[str] = self.model.generate(**A , **A )
snake_case : Dict = output_ids.shape[0]
if self.framework == "pt":
snake_case : List[Any] = output_ids.reshape(A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
snake_case : Any = tf.reshape(A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase ( self , A , A=ReturnType.TEXT , A=False ) -> Union[str, Any]:
snake_case : Tuple = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
snake_case : Dict = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
snake_case : int = {
f"""{self.return_name}_text""": self.tokenizer.decode(
A , skip_special_tokens=A , clean_up_tokenization_spaces=A , )
}
records.append(A )
return records
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """summary"""
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
def UpperCAmelCase ( self , A , A , A ) -> bool:
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """translation"""
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def UpperCAmelCase ( self , *A , A=TruncationStrategy.DO_NOT_TRUNCATE , A=None , A=None ) -> Optional[int]:
if getattr(self.tokenizer , """_build_translation_inputs""" , A ):
return self.tokenizer._build_translation_inputs(
*A , return_tensors=self.framework , truncation=A , src_lang=A , tgt_lang=A )
else:
return super()._parse_and_tokenize(*A , truncation=A )
def UpperCAmelCase ( self , A=None , A=None , **A ) -> Union[str, Any]:
snake_case , snake_case , snake_case : str = super()._sanitize_parameters(**A )
if src_lang is not None:
snake_case : Tuple = src_lang
if tgt_lang is not None:
snake_case : str = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
snake_case : Union[str, Any] = kwargs.get("""task""" , self.task )
snake_case : Any = task.split("""_""" )
if task and len(A ) == 4:
# translation, XX, to YY
snake_case : Optional[Any] = items[1]
snake_case : Dict = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
| 684 | 1 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = (UniPCMultistepScheduler,)
_snake_case = (("""num_inference_steps""", 25),)
def UpperCAmelCase ( self , **A ) -> Optional[Any]:
snake_case : Union[str, Any] = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""solver_type""": """bh2""",
}
config.update(**A )
return config
def UpperCAmelCase ( self , A=0 , **A ) -> int:
snake_case : Dict = dict(self.forward_default_kwargs )
snake_case : Tuple = kwargs.pop("""num_inference_steps""" , A )
snake_case : List[Any] = self.dummy_sample
snake_case : List[Any] = 0.1 * sample
snake_case : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case : Union[str, Any] = self.get_scheduler_config(**A )
snake_case : Union[str, Any] = scheduler_class(**A )
scheduler.set_timesteps(A )
# copy over dummy past residuals
snake_case : Any = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A )
snake_case : Optional[Any] = scheduler_class.from_pretrained(A )
new_scheduler.set_timesteps(A )
# copy over dummy past residuals
snake_case : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case , snake_case : Union[str, Any] = sample, sample
for t in range(A , time_step + scheduler.config.solver_order + 1 ):
snake_case : Tuple = scheduler.step(A , A , A , **A ).prev_sample
snake_case : Optional[int] = new_scheduler.step(A , A , A , **A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self , A=0 , **A ) -> Tuple:
snake_case : Union[str, Any] = dict(self.forward_default_kwargs )
snake_case : List[Any] = kwargs.pop("""num_inference_steps""" , A )
snake_case : Optional[int] = self.dummy_sample
snake_case : List[Any] = 0.1 * sample
snake_case : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case : Any = self.get_scheduler_config()
snake_case : str = scheduler_class(**A )
scheduler.set_timesteps(A )
# copy over dummy past residuals (must be after setting timesteps)
snake_case : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A )
snake_case : List[Any] = scheduler_class.from_pretrained(A )
# copy over dummy past residuals
new_scheduler.set_timesteps(A )
# copy over dummy past residual (must be after setting timesteps)
snake_case : str = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case : str = scheduler.step(A , A , A , **A ).prev_sample
snake_case : Tuple = new_scheduler.step(A , A , A , **A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self , A=None , **A ) -> Optional[int]:
if scheduler is None:
snake_case : Tuple = self.scheduler_classes[0]
snake_case : Dict = self.get_scheduler_config(**A )
snake_case : List[str] = scheduler_class(**A )
snake_case : str = self.scheduler_classes[0]
snake_case : Optional[Any] = self.get_scheduler_config(**A )
snake_case : Any = scheduler_class(**A )
snake_case : Tuple = 1_0
snake_case : Tuple = self.dummy_model()
snake_case : Any = self.dummy_sample_deter
scheduler.set_timesteps(A )
for i, t in enumerate(scheduler.timesteps ):
snake_case : Tuple = model(A , A )
snake_case : Tuple = scheduler.step(A , A , A ).prev_sample
return sample
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : List[Any] = dict(self.forward_default_kwargs )
snake_case : Optional[int] = kwargs.pop("""num_inference_steps""" , A )
for scheduler_class in self.scheduler_classes:
snake_case : Optional[int] = self.get_scheduler_config()
snake_case : Optional[Any] = scheduler_class(**A )
snake_case : str = self.dummy_sample
snake_case : int = 0.1 * sample
if num_inference_steps is not None and hasattr(A , """set_timesteps""" ):
scheduler.set_timesteps(A )
elif num_inference_steps is not None and not hasattr(A , """set_timesteps""" ):
snake_case : Tuple = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
snake_case : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
snake_case : int = scheduler.timesteps[5]
snake_case : Any = scheduler.timesteps[6]
snake_case : Optional[Any] = scheduler.step(A , A , A , **A ).prev_sample
snake_case : List[Any] = scheduler.step(A , A , A , **A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase ( self ) -> Dict:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
snake_case : Tuple = UniPCMultistepScheduler(**self.get_scheduler_config() )
snake_case : Union[str, Any] = self.full_loop(scheduler=A )
snake_case : Any = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.24_64 ) < 1e-3
snake_case : Any = DPMSolverSinglestepScheduler.from_config(scheduler.config )
snake_case : Union[str, Any] = DEISMultistepScheduler.from_config(scheduler.config )
snake_case : List[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
snake_case : List[str] = UniPCMultistepScheduler.from_config(scheduler.config )
snake_case : Optional[Any] = self.full_loop(scheduler=A )
snake_case : int = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.24_64 ) < 1e-3
def UpperCAmelCase ( self ) -> Dict:
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=A )
def UpperCAmelCase ( self ) -> Any:
self.check_over_configs(thresholding=A )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=A , prediction_type=A , sample_max_value=A , solver_order=A , solver_type=A , )
def UpperCAmelCase ( self ) -> List[str]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A )
def UpperCAmelCase ( self ) -> List[Any]:
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=A , solver_type=A , prediction_type=A , )
snake_case : Dict = self.full_loop(
solver_order=A , solver_type=A , prediction_type=A , )
assert not torch.isnan(A ).any(), "Samples have nan numbers"
def UpperCAmelCase ( self ) -> List[str]:
self.check_over_configs(lower_order_final=A )
self.check_over_configs(lower_order_final=A )
def UpperCAmelCase ( self ) -> int:
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=A , time_step=0 )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : int = self.full_loop()
snake_case : Dict = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.24_64 ) < 1e-3
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Any = self.full_loop(prediction_type="""v_prediction""" )
snake_case : Tuple = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.10_14 ) < 1e-3
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Dict = self.scheduler_classes[0]
snake_case : List[str] = self.get_scheduler_config(thresholding=A , dynamic_thresholding_ratio=0 )
snake_case : List[Any] = scheduler_class(**A )
snake_case : int = 1_0
snake_case : Tuple = self.dummy_model()
snake_case : Any = self.dummy_sample_deter.half()
scheduler.set_timesteps(A )
for i, t in enumerate(scheduler.timesteps ):
snake_case : Union[str, Any] = model(A , A )
snake_case : int = scheduler.step(A , A , A ).prev_sample
assert sample.dtype == torch.floataa
def UpperCAmelCase ( self , **A ) -> Optional[int]:
for scheduler_class in self.scheduler_classes:
snake_case : Optional[int] = self.get_scheduler_config(**A )
snake_case : Dict = scheduler_class(**A )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 684 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
snake_case : int = []
for line in lines:
snake_case : Dict = re.sub(R"""#.*""" ,"""""" ,lowercase ) # remove comments
if line:
filtered_lines.append(lowercase )
snake_case : Optional[int] = """\n""".join(lowercase )
# Make a hash from all this code
snake_case : List[str] = full_str.encode("""utf-8""" )
return shaaaa(lowercase ).hexdigest()
# get importable module names and hash for caching
lowerCamelCase : Any = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
lowerCamelCase : Optional[int] = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
lowerCamelCase : Tuple = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
lowerCamelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 684 | 1 |
import math
class __lowercase :
"""simple docstring"""
def __init__( self , A=0 ) -> Tuple: # a graph with Node 0,1,...,N-1
snake_case : int = n
snake_case : Optional[int] = [
[math.inf for j in range(0 , A )] for i in range(0 , A )
] # adjacency matrix for weight
snake_case : int = [
[math.inf for j in range(0 , A )] for i in range(0 , A )
] # dp[i][j] stores minimum distance from i to j
def UpperCAmelCase ( self , A , A , A ) -> Optional[Any]:
snake_case : str = w
def UpperCAmelCase ( self ) -> Optional[int]:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
snake_case : Tuple = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCAmelCase ( self , A , A ) -> Any:
return self.dp[u][v]
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 684 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Tuple:
# Initialise PyTorch model
snake_case : int = RemBertConfig.from_json_file(lowercase )
print("""Building PyTorch model from configuration: {}""".format(str(lowercase ) ) )
snake_case : Tuple = RemBertModel(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowercase ,lowercase ,lowercase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(lowercase ) )
torch.save(model.state_dict() ,lowercase )
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCamelCase : Dict = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 684 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : Optional[int] = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowerCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 684 |
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[str]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Union[str, Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Any:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Dict:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
| 684 | 1 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
lowerCamelCase : List[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ) -> int:
for attribute in key.split(""".""" ):
snake_case : Tuple = getattr(lowercase ,lowercase )
if weight_type is not None:
snake_case : Tuple = getattr(lowercase ,lowercase ).shape
else:
snake_case : Tuple = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
snake_case : int = value
elif weight_type == "weight_g":
snake_case : str = value
elif weight_type == "weight_v":
snake_case : Any = value
elif weight_type == "bias":
snake_case : List[str] = value
else:
snake_case : Optional[Any] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Dict:
snake_case : List[Any] = []
snake_case : Dict = fairseq_model.state_dict()
snake_case : Any = hf_model.feature_extractor
snake_case : Dict = hf_model.adapter
for name, value in fairseq_dict.items():
snake_case : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowercase ,lowercase ,lowercase ,lowercase ,hf_model.config.feat_extract_norm == """group""" ,)
snake_case : Optional[int] = True
elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ):
load_adapter(lowercase ,lowercase ,lowercase ,lowercase )
snake_case : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case : Optional[Any] = True
if "*" in mapped_key:
snake_case : List[Any] = name.split(lowercase )[0].split(""".""" )[-2]
snake_case : List[str] = mapped_key.replace("""*""" ,lowercase )
if "weight_g" in name:
snake_case : str = """weight_g"""
elif "weight_v" in name:
snake_case : str = """weight_v"""
elif "bias" in name:
snake_case : str = """bias"""
elif "weight" in name:
snake_case : List[str] = """weight"""
else:
snake_case : Optional[int] = None
set_recursively(lowercase ,lowercase ,lowercase ,lowercase ,lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ) -> Optional[Any]:
snake_case : List[str] = full_name.split("""conv_layers.""" )[-1]
snake_case : Any = name.split(""".""" )
snake_case : Any = int(items[0] )
snake_case : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
snake_case : List[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
snake_case : List[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
snake_case : List[str] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
snake_case : Dict = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> Any:
snake_case : Union[str, Any] = full_name.split("""adaptor.""" )[-1]
snake_case : Dict = name.split(""".""" )
if items[1].isdigit():
snake_case : int = int(items[1] )
else:
snake_case : Union[str, Any] = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."""
snake_case : Optional[int] = value
logger.info(f"""Adapter proj layer norm bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."""
snake_case : Any = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."""
snake_case : int = value
logger.info(f"""Adapter proj layer bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."""
snake_case : List[str] = value
logger.info(f"""Adapter proj layer weight was initialized from {full_name}.""" )
elif isinstance(lowercase ,lowercase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."""
snake_case : Optional[int] = value
logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."""
snake_case : Optional[int] = value
logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case , snake_case : Any = emb.weight.shape
snake_case : Union[str, Any] = nn.Linear(lowercase ,lowercase ,bias=lowercase )
snake_case : Optional[Any] = emb.weight.data
return lin_layer
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,) -> str:
snake_case : List[str] = WavaVecaConfig.from_pretrained(
lowercase ,add_adapter=lowercase ,adapter_stride=lowercase ,adapter_kernel_size=lowercase ,use_auth_token=lowercase ,output_hidden_size=lowercase ,)
snake_case : int = MBartConfig.from_pretrained(lowercase )
# load model
snake_case , snake_case , snake_case : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={
"""config_yaml""": config_yaml_path,
"""data""": """/""".join(dict_path.split("""/""" )[:-1] ),
"""w2v_path""": checkpoint_path,
"""load_pretrained_decoder_from""": None,
} ,)
snake_case : Tuple = model[0].eval()
# load feature extractor
snake_case : List[str] = WavaVecaFeatureExtractor.from_pretrained(lowercase ,use_auth_token=lowercase )
# set weights for wav2vec2 encoder
snake_case : Dict = WavaVecaModel(lowercase )
recursively_load_weights_wavaveca(model.encoder ,lowercase )
# load decoder weights
snake_case : int = MBartForCausalLM(lowercase )
snake_case , snake_case : Optional[int] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() ,strict=lowercase )
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
snake_case : List[str] = SpeechEncoderDecoderModel(encoder=lowercase ,decoder=lowercase )
snake_case : Union[str, Any] = False
snake_case : Any = MBartaaTokenizer(lowercase )
tokenizer.save_pretrained(lowercase )
snake_case : List[str] = hf_wavavec.config.to_dict()
snake_case : Union[str, Any] = tokenizer.pad_token_id
snake_case : str = tokenizer.bos_token_id
snake_case : Optional[Any] = tokenizer.eos_token_id
snake_case : str = """mbart50"""
snake_case : Dict = """wav2vec2"""
snake_case : Union[str, Any] = tokenizer.eos_token_id
snake_case : Tuple = 250004
snake_case : Union[str, Any] = tokenizer.eos_token_id
snake_case : str = SpeechEncoderDecoderConfig.from_dict(lowercase )
hf_wavavec.save_pretrained(lowercase )
feature_extractor.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_yaml_path', default=None, type=str, help='Path to yaml file of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-xls-r-1b',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/mbart-large-50-one-to-many-mmt',
type=str,
help='Path to hf decoder checkpoint config',
)
parser.add_argument('--add_adapter', default=True, type=bool, help='whethere to add model adapter layers')
parser.add_argument('--adapter_stride', default=2, type=int, help='stride of adapter layers')
parser.add_argument('--adapter_kernel_size', default=3, type=int, help='kernel size of adapter layers')
parser.add_argument('--encoder_output_dim', default=1_0_2_4, type=int, help='encoder output dim')
parser.add_argument('--start_token_id', default=2_5_0_0_0_4, type=int, help='`decoder_start_token_id` of model config')
lowerCamelCase : Optional[int] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 684 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCamelCase : List[str] = 3
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
print("""Generating primitive root of p""" )
while True:
snake_case : Optional[int] = random.randrange(3 ,lowercase )
if pow(lowercase ,2 ,lowercase ) == 1:
continue
if pow(lowercase ,lowercase ,lowercase ) == 1:
continue
return g
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print("""Generating prime p...""" )
snake_case : Optional[int] = rabin_miller.generate_large_prime(lowercase ) # select large prime number.
snake_case : Optional[int] = primitive_root(lowercase ) # one primitive root on modulo p.
snake_case : Optional[Any] = random.randrange(3 ,lowercase ) # private_key -> have to be greater than 2 for safety.
snake_case : Tuple = cryptomath.find_mod_inverse(pow(lowercase ,lowercase ,lowercase ) ,lowercase )
snake_case : str = (key_size, e_a, e_a, p)
snake_case : Optional[Any] = (key_size, d)
return public_key, private_key
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> None:
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print("""\nWARNING:""" )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
snake_case , snake_case : Optional[Any] = generate_key(lowercase )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
print("""Making key files...""" )
make_key_files("""elgamal""" ,2048 )
print("""Key files generation successful""" )
if __name__ == "__main__":
main()
| 684 | 1 |
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax""", """transformers"""]
def __init__( self , *A , **A ) -> List[str]:
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Any:
requires_backends(cls , ["""flax""", """transformers"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax""", """transformers"""]
def __init__( self , *A , **A ) -> Dict:
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax""", """transformers"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax""", """transformers"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Any:
requires_backends(cls , ["""flax""", """transformers"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax""", """transformers"""]
def __init__( self , *A , **A ) -> List[str]:
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax""", """transformers"""] )
| 684 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
snake_case : Dict = _modexpt(lowercase ,exponent // 2 ,lowercase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowercase ,exponent - 1 ,lowercase )) % modulo_value
def SCREAMING_SNAKE_CASE__ ( lowercase = 1777 ,lowercase = 1855 ,lowercase = 8 ) -> int:
snake_case : int = base
for _ in range(1 ,lowercase ):
snake_case : List[str] = _modexpt(lowercase ,lowercase ,10**digits )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 | 1 |
import os
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
with open(os.path.dirname(lowercase ) + """/grid.txt""" ) as f:
snake_case : Tuple = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowercase ) for x in f.readline().split()] )
snake_case : Optional[Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
snake_case : List[Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
snake_case : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
snake_case : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
snake_case : str = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
snake_case : int = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
snake_case : int = temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
snake_case : Any = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
snake_case : Any = temp
return maximum
if __name__ == "__main__":
print(solution())
| 684 |
from itertools import product
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list[int]:
snake_case : Tuple = sides_number
snake_case : List[str] = max_face_number * dice_number
snake_case : Any = [0] * (max_total + 1)
snake_case : int = 1
snake_case : List[str] = range(lowercase ,max_face_number + 1 )
for dice_numbers in product(lowercase ,repeat=lowercase ):
snake_case : Any = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def SCREAMING_SNAKE_CASE__ ( ) -> float:
snake_case : List[str] = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
snake_case : str = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
snake_case : Optional[int] = 0
snake_case : List[str] = 9
snake_case : Union[str, Any] = 4 * 9
snake_case : Dict = 6
for peter_total in range(lowercase ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
snake_case : str = (4**9) * (6**6)
snake_case : int = peter_wins_count / total_games_number
snake_case : Optional[int] = round(lowercase ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : Optional[int] = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
lowerCamelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 684 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 684 | 1 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Tuple:
# Initialise PyTorch model
snake_case : int = RemBertConfig.from_json_file(lowercase )
print("""Building PyTorch model from configuration: {}""".format(str(lowercase ) ) )
snake_case : Tuple = RemBertModel(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowercase ,lowercase ,lowercase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(lowercase ) )
torch.save(model.state_dict() ,lowercase )
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCamelCase : Dict = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 684 |
import os
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
with open(os.path.dirname(lowercase ) + """/grid.txt""" ) as f:
snake_case : Tuple = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowercase ) for x in f.readline().split()] )
snake_case : Optional[Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
snake_case : List[Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
snake_case : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
snake_case : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
snake_case : str = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
snake_case : int = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
snake_case : int = temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
snake_case : Any = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
snake_case : Any = temp
return maximum
if __name__ == "__main__":
print(solution())
| 684 | 1 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class __lowercase (unittest.TestCase , UpperCamelCase__ ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : str = load_tool("""text-classification""" )
self.tool.setup()
snake_case : Any = load_tool("""text-classification""" , remote=A )
def UpperCAmelCase ( self ) -> int:
snake_case : str = self.tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(A , """positive""" )
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Any = self.remote_tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(A , """positive""" )
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Optional[Any] = self.tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(A , """positive""" )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : int = self.remote_tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(A , """positive""" )
| 684 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list:
for i in range(len(lowercase ) - 1 ,0 ,-1 ):
snake_case : Any = False
for j in range(lowercase ,0 ,-1 ):
if unsorted[j] < unsorted[j - 1]:
snake_case , snake_case : Optional[Any] = unsorted[j - 1], unsorted[j]
snake_case : Dict = True
for j in range(lowercase ):
if unsorted[j] > unsorted[j + 1]:
snake_case , snake_case : Dict = unsorted[j + 1], unsorted[j]
snake_case : Tuple = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Any = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase : Optional[int] = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 684 | 1 |
from __future__ import annotations
from statistics import mean
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> list[int]:
snake_case : str = [0] * no_of_processes
snake_case : Optional[Any] = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(lowercase ):
snake_case : List[str] = burst_time[i]
snake_case : list[int] = []
snake_case : Optional[int] = 0
snake_case : Dict = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
snake_case : List[str] = []
snake_case : str = -1
for i in range(lowercase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(lowercase )
if len(lowercase ) > 0:
snake_case : Tuple = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
snake_case : Union[str, Any] = i
total_time += burst_time[target_process]
completed += 1
snake_case : Any = 0
snake_case : Optional[Any] = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> list[int]:
snake_case : Dict = [0] * no_of_processes
for i in range(lowercase ):
snake_case : Optional[int] = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('[TEST CASE 01]')
lowerCamelCase : List[Any] = 4
lowerCamelCase : List[str] = [2, 5, 3, 7]
lowerCamelCase : Tuple = [0, 0, 0, 0]
lowerCamelCase : Any = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCamelCase : List[Any] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time')
for i, process_id in enumerate(list(range(1, 5))):
print(
f"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
f"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(f"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(f"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 684 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Any = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
lowerCamelCase : Any = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
lowerCamelCase : Optional[int] = {
'jukebox': 5_1_2,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_LYRIC_TOKENS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A , A , A=["v3", "v2", "v2"] , A=5_1_2 , A=5 , A="<|endoftext|>" , **A , ) -> Optional[Any]:
snake_case : Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
super().__init__(
unk_token=A , n_genres=A , version=A , max_n_lyric_tokens=A , **A , )
snake_case : Optional[Any] = version
snake_case : Optional[Any] = max_n_lyric_tokens
snake_case : Tuple = n_genres
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : Union[str, Any] = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : str = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : List[str] = json.load(A )
snake_case : Tuple = r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 7_9:
snake_case : Optional[Any] = oov.replace(r"""\-'""" , r"""\-+'""" )
snake_case : Optional[Any] = regex.compile(A )
snake_case : Optional[Any] = {v: k for k, v in self.artists_encoder.items()}
snake_case : int = {v: k for k, v in self.genres_encoder.items()}
snake_case : List[Any] = {v: k for k, v in self.lyrics_encoder.items()}
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def UpperCAmelCase ( self ) -> str:
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def UpperCAmelCase ( self , A , A , A ) -> Optional[Any]:
snake_case : Optional[int] = [self.artists_encoder.get(A , 0 ) for artist in list_artists]
for genres in range(len(A ) ):
snake_case : Optional[int] = [self.genres_encoder.get(A , 0 ) for genre in list_genres[genres]]
snake_case : Union[str, Any] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
snake_case : Optional[Any] = [[self.lyrics_encoder.get(A , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def UpperCAmelCase ( self , A ) -> List[str]:
return list(A )
def UpperCAmelCase ( self , A , A , A , **A ) -> List[str]:
snake_case , snake_case , snake_case : Any = self.prepare_for_tokenization(A , A , A )
snake_case : Tuple = self._tokenize(A )
return artist, genre, lyrics
def UpperCAmelCase ( self , A , A , A , A = False ) -> Tuple[str, str, str, Dict[str, Any]]:
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
snake_case : Tuple = artists[idx].lower()
snake_case : List[Any] = [genres[idx].lower()]
else:
snake_case : Union[str, Any] = self._normalize(artists[idx] ) + """.v2"""
snake_case : Any = [
self._normalize(A ) + """.v2""" for genre in genres[idx].split("""_""" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
snake_case : str = regex.compile(r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" )
snake_case : Dict = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
snake_case : Union[str, Any] = {vocab[index]: index + 1 for index in range(len(A ) )}
snake_case : Optional[int] = 0
snake_case : Union[str, Any] = len(A ) + 1
snake_case : Optional[int] = self.vocab
snake_case : str = {v: k for k, v in self.vocab.items()}
snake_case : int = """"""
else:
snake_case : Optional[int] = regex.compile(r"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""" )
snake_case : int = self._run_strip_accents(A )
snake_case : Any = lyrics.replace("""\\""" , """\n""" )
snake_case : Tuple = self.out_of_vocab.sub("""""" , A ), [], []
return artists, genres, lyrics
def UpperCAmelCase ( self , A ) -> List[Any]:
snake_case : int = unicodedata.normalize("""NFD""" , A )
snake_case : int = []
for char in text:
snake_case : Optional[Any] = unicodedata.category(A )
if cat == "Mn":
continue
output.append(A )
return "".join(A )
def UpperCAmelCase ( self , A ) -> str:
snake_case : Dict = (
[chr(A ) for i in range(ord("""a""" ) , ord("""z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""A""" ) , ord("""Z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""0""" ) , ord("""9""" ) + 1 )]
+ ["""."""]
)
snake_case : Dict = frozenset(A )
snake_case : Dict = re.compile(r"""_+""" )
snake_case : str = """""".join([c if c in accepted else """_""" for c in text.lower()] )
snake_case : List[Any] = pattern.sub("""_""" , A ).strip("""_""" )
return text
def UpperCAmelCase ( self , A ) -> str:
return " ".join(A )
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[Any]:
# Convert to TensorType
if not isinstance(A , A ):
snake_case : Tuple = TensorType(A )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""" )
import tensorflow as tf
snake_case : Union[str, Any] = tf.constant
snake_case : int = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""" )
import torch
snake_case : List[str] = torch.tensor
snake_case : Optional[Any] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""" )
import jax.numpy as jnp # noqa: F811
snake_case : Optional[int] = jnp.array
snake_case : Dict = _is_jax
else:
snake_case : List[str] = np.asarray
snake_case : Tuple = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
snake_case : Any = [inputs]
if not is_tensor(A ):
snake_case : List[Any] = as_tensor(A )
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""" )
return inputs
def __call__( self , A , A , A="" , A="pt" ) -> BatchEncoding:
snake_case : List[str] = [0, 0, 0]
snake_case : List[str] = [artist] * len(self.version )
snake_case : List[Any] = [genres] * len(self.version )
snake_case , snake_case , snake_case : Optional[int] = self.tokenize(A , A , A )
snake_case , snake_case , snake_case : int = self._convert_token_to_id(A , A , A )
snake_case : Any = [-INFINITY] * len(full_tokens[-1] )
snake_case : int = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=A )
for i in range(len(self.version ) )
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks} )
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=A ) )
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=A ) )
snake_case : Tuple = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=A ) )
return (artists_file, genres_file, lyrics_file)
def UpperCAmelCase ( self , A , A , A ) -> List[Any]:
snake_case : Optional[int] = self.artists_decoder.get(A )
snake_case : Optional[Any] = [self.genres_decoder.get(A ) for genre in genres_index]
snake_case : Optional[int] = [self.lyrics_decoder.get(A ) for character in lyric_index]
return artist, genres, lyrics
| 684 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : int = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """table-transformer"""
_snake_case = ["""past_key_values"""]
_snake_case = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , A=True , A=None , A=3 , A=1_0_0 , A=6 , A=2_0_4_8 , A=8 , A=6 , A=2_0_4_8 , A=8 , A=0.0 , A=0.0 , A=True , A="relu" , A=2_5_6 , A=0.1 , A=0.0 , A=0.0 , A=0.02 , A=1.0 , A=False , A="sine" , A="resnet50" , A=True , A=False , A=1 , A=5 , A=2 , A=1 , A=1 , A=5 , A=2 , A=0.1 , **A , ) -> Union[str, Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
snake_case : Any = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(A , A ):
snake_case : int = backbone_config.get("""model_type""" )
snake_case : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
snake_case : Tuple = config_class.from_dict(A )
# set timm attributes to None
snake_case , snake_case , snake_case : Tuple = None, None, None
snake_case : Any = use_timm_backbone
snake_case : int = backbone_config
snake_case : str = num_channels
snake_case : List[Any] = num_queries
snake_case : Union[str, Any] = d_model
snake_case : List[str] = encoder_ffn_dim
snake_case : Tuple = encoder_layers
snake_case : Tuple = encoder_attention_heads
snake_case : Union[str, Any] = decoder_ffn_dim
snake_case : int = decoder_layers
snake_case : List[str] = decoder_attention_heads
snake_case : Dict = dropout
snake_case : str = attention_dropout
snake_case : Optional[int] = activation_dropout
snake_case : int = activation_function
snake_case : Tuple = init_std
snake_case : Dict = init_xavier_std
snake_case : Tuple = encoder_layerdrop
snake_case : Optional[int] = decoder_layerdrop
snake_case : Tuple = encoder_layers
snake_case : Dict = auxiliary_loss
snake_case : Tuple = position_embedding_type
snake_case : Optional[Any] = backbone
snake_case : Any = use_pretrained_backbone
snake_case : List[str] = dilation
# Hungarian matcher
snake_case : str = class_cost
snake_case : List[Any] = bbox_cost
snake_case : Optional[int] = giou_cost
# Loss coefficients
snake_case : Union[str, Any] = mask_loss_coefficient
snake_case : Optional[int] = dice_loss_coefficient
snake_case : Dict = bbox_loss_coefficient
snake_case : List[Any] = giou_loss_coefficient
snake_case : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=A , **A )
@property
def UpperCAmelCase ( self ) -> int:
return self.encoder_attention_heads
@property
def UpperCAmelCase ( self ) -> int:
return self.d_model
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = version.parse("""1.11""" )
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def UpperCAmelCase ( self ) -> float:
return 1e-5
@property
def UpperCAmelCase ( self ) -> int:
return 1_2
| 684 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
snake_case : str = len(lowercase )
snake_case : Tuple = []
for i in range(len(lowercase ) - pat_len + 1 ):
snake_case : str = True
for j in range(lowercase ):
if s[i + j] != pattern[j]:
snake_case : Dict = False
break
if match_found:
position.append(lowercase )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 684 | 1 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase : Optional[Any] = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase : Union[str, Any] = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase : List[str] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> tuple[str, float]:
snake_case : List[Any] = len([g for position, g in enumerate(lowercase ) if g == main_target[position]] )
return (item, float(lowercase ))
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> tuple[str, str]:
snake_case : Union[str, Any] = random.randint(0 ,len(lowercase ) - 1 )
snake_case : Union[str, Any] = parent_a[:random_slice] + parent_a[random_slice:]
snake_case : str = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> str:
snake_case : Union[str, Any] = list(lowercase )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
snake_case : Any = random.choice(lowercase )
return "".join(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,) -> list[str]:
snake_case : Tuple = []
# Generate more children proportionally to the fitness score.
snake_case : Optional[int] = int(parent_a[1] * 100 ) + 1
snake_case : Union[str, Any] = 10 if child_n >= 10 else child_n
for _ in range(lowercase ):
snake_case : str = population_score[random.randint(0 ,lowercase )][0]
snake_case , snake_case : int = crossover(parent_a[0] ,lowercase )
# Append new string to the population list.
pop.append(mutate(lowercase ,lowercase ) )
pop.append(mutate(lowercase ,lowercase ) )
return pop
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
snake_case : int = f"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(lowercase )
# Verify that the target contains no genes besides the ones inside genes variable.
snake_case : int = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
snake_case : List[Any] = f"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(lowercase )
# Generate random starting population.
snake_case : Any = []
for _ in range(lowercase ):
population.append("""""".join([random.choice(lowercase ) for i in range(len(lowercase ) )] ) )
# Just some logs to know what the algorithms is doing.
snake_case , snake_case : str = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowercase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
snake_case : Tuple = [evaluate(lowercase ,lowercase ) for item in population]
# Check if there is a matching evolution.
snake_case : int = sorted(lowercase ,key=lambda lowercase : x[1] ,reverse=lowercase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"""\nGeneration: {generation}"""
f"""\nTotal Population:{total_population}"""
f"""\nBest score: {population_score[0][1]}"""
f"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
snake_case : Any = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowercase )
# Normalize population score to be between 0 and 1.
snake_case : int = [
(item, score / len(lowercase )) for item, score in population_score
]
# This is selection
for i in range(lowercase ):
population.extend(select(population_score[int(lowercase )] ,lowercase ,lowercase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowercase ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase : Optional[int] = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
lowerCamelCase : List[Any] = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 684 |
import numpy as np
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase : Tuple = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 684 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase : Tuple = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 684 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """speech_to_text"""
_snake_case = ["""past_key_values"""]
_snake_case = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , A=1_0_0_0_0 , A=1_2 , A=2_0_4_8 , A=4 , A=6 , A=2_0_4_8 , A=4 , A=0.0 , A=0.0 , A=True , A=True , A="relu" , A=2_5_6 , A=0.1 , A=0.0 , A=0.0 , A=0.02 , A=2 , A=True , A=1 , A=0 , A=2 , A=6_0_0_0 , A=1_0_2_4 , A=2 , A=(5, 5) , A=1_0_2_4 , A=8_0 , A=1 , **A , ) -> str:
snake_case : List[str] = vocab_size
snake_case : Optional[Any] = d_model
snake_case : str = encoder_ffn_dim
snake_case : Optional[Any] = encoder_layers
snake_case : Any = encoder_attention_heads
snake_case : List[Any] = decoder_ffn_dim
snake_case : Any = decoder_layers
snake_case : Tuple = decoder_attention_heads
snake_case : Dict = dropout
snake_case : Optional[int] = attention_dropout
snake_case : Optional[int] = activation_dropout
snake_case : str = activation_function
snake_case : int = init_std
snake_case : str = encoder_layerdrop
snake_case : Optional[Any] = decoder_layerdrop
snake_case : Any = use_cache
snake_case : int = encoder_layers
snake_case : int = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case : str = max_source_positions
snake_case : Optional[int] = max_target_positions
snake_case : List[str] = num_conv_layers
snake_case : List[str] = list(A )
snake_case : Dict = conv_channels
snake_case : Optional[int] = input_feat_per_channel
snake_case : Union[str, Any] = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
f"""but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, """
f"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
super().__init__(
pad_token_id=A , bos_token_id=A , eos_token_id=A , is_encoder_decoder=A , decoder_start_token_id=A , **A , )
| 684 |
lowerCamelCase : Union[str, Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCamelCase : Tuple = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCamelCase : Union[str, Any] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 684 | 1 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __lowercase (nn.Module ):
"""simple docstring"""
_snake_case = 42
_snake_case = 42
_snake_case = 0.0
_snake_case = 1
_snake_case = 1
_snake_case = True
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = jnp.floataa
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : int = []
snake_case : List[str] = []
for i in range(self.num_layers ):
snake_case : int = self.in_channels if i == 0 else self.out_channels
snake_case : str = FlaxResnetBlockaD(
in_channels=A , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(A )
snake_case : Any = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(A )
snake_case : Tuple = resnets
snake_case : Tuple = attentions
if self.add_downsample:
snake_case : List[Any] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , A , A , A , A=True ) -> List[Any]:
snake_case : int = ()
for resnet, attn in zip(self.resnets , self.attentions ):
snake_case : Union[str, Any] = resnet(A , A , deterministic=A )
snake_case : Tuple = attn(A , A , deterministic=A )
output_states += (hidden_states,)
if self.add_downsample:
snake_case : int = self.downsamplers_a(A )
output_states += (hidden_states,)
return hidden_states, output_states
class __lowercase (nn.Module ):
"""simple docstring"""
_snake_case = 42
_snake_case = 42
_snake_case = 0.0
_snake_case = 1
_snake_case = True
_snake_case = jnp.floataa
def UpperCAmelCase ( self ) -> int:
snake_case : Optional[Any] = []
for i in range(self.num_layers ):
snake_case : List[Any] = self.in_channels if i == 0 else self.out_channels
snake_case : Union[str, Any] = FlaxResnetBlockaD(
in_channels=A , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(A )
snake_case : List[Any] = resnets
if self.add_downsample:
snake_case : List[Any] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , A , A , A=True ) -> Any:
snake_case : List[Any] = ()
for resnet in self.resnets:
snake_case : Any = resnet(A , A , deterministic=A )
output_states += (hidden_states,)
if self.add_downsample:
snake_case : Dict = self.downsamplers_a(A )
output_states += (hidden_states,)
return hidden_states, output_states
class __lowercase (nn.Module ):
"""simple docstring"""
_snake_case = 42
_snake_case = 42
_snake_case = 42
_snake_case = 0.0
_snake_case = 1
_snake_case = 1
_snake_case = True
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = jnp.floataa
def UpperCAmelCase ( self ) -> Any:
snake_case : int = []
snake_case : List[Any] = []
for i in range(self.num_layers ):
snake_case : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
snake_case : Tuple = self.prev_output_channel if i == 0 else self.out_channels
snake_case : List[str] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(A )
snake_case : List[Any] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(A )
snake_case : int = resnets
snake_case : int = attentions
if self.add_upsample:
snake_case : Optional[int] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , A , A , A , A , A=True ) -> int:
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
snake_case : Dict = res_hidden_states_tuple[-1]
snake_case : Optional[Any] = res_hidden_states_tuple[:-1]
snake_case : Union[str, Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
snake_case : Dict = resnet(A , A , deterministic=A )
snake_case : Union[str, Any] = attn(A , A , deterministic=A )
if self.add_upsample:
snake_case : Dict = self.upsamplers_a(A )
return hidden_states
class __lowercase (nn.Module ):
"""simple docstring"""
_snake_case = 42
_snake_case = 42
_snake_case = 42
_snake_case = 0.0
_snake_case = 1
_snake_case = True
_snake_case = jnp.floataa
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Optional[Any] = []
for i in range(self.num_layers ):
snake_case : Optional[int] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
snake_case : Tuple = self.prev_output_channel if i == 0 else self.out_channels
snake_case : Union[str, Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(A )
snake_case : int = resnets
if self.add_upsample:
snake_case : Any = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , A , A , A , A=True ) -> str:
for resnet in self.resnets:
# pop res hidden states
snake_case : List[Any] = res_hidden_states_tuple[-1]
snake_case : Optional[int] = res_hidden_states_tuple[:-1]
snake_case : List[str] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
snake_case : Union[str, Any] = resnet(A , A , deterministic=A )
if self.add_upsample:
snake_case : Any = self.upsamplers_a(A )
return hidden_states
class __lowercase (nn.Module ):
"""simple docstring"""
_snake_case = 42
_snake_case = 0.0
_snake_case = 1
_snake_case = 1
_snake_case = False
_snake_case = False
_snake_case = jnp.floataa
def UpperCAmelCase ( self ) -> Union[str, Any]:
# there is always at least one resnet
snake_case : Tuple = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
snake_case : Dict = []
for _ in range(self.num_layers ):
snake_case : str = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(A )
snake_case : str = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(A )
snake_case : Optional[Any] = resnets
snake_case : str = attentions
def __call__( self , A , A , A , A=True ) -> Dict:
snake_case : Union[str, Any] = self.resnets[0](A , A )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
snake_case : Any = attn(A , A , deterministic=A )
snake_case : Dict = resnet(A , A , deterministic=A )
return hidden_states
| 684 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'vocab_file': 'spm_char.model'}
lowerCamelCase : List[str] = {
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
lowerCamelCase : List[Any] = {
'microsoft/speecht5_asr': 1_0_2_4,
'microsoft/speecht5_tts': 1_0_2_4,
'microsoft/speecht5_vc': 1_0_2_4,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A="<s>" , A="</s>" , A="<unk>" , A="<pad>" , A = None , **A , ) -> None:
snake_case : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , pad_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
snake_case : Tuple = vocab_file
snake_case : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def UpperCAmelCase ( self ) -> List[Any]:
return self.sp_model.get_piece_size()
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Any = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[str]:
snake_case : Optional[Any] = self.__dict__.copy()
snake_case : Optional[Any] = None
return state
def __setstate__( self , A ) -> Tuple:
snake_case : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case : List[Any] = {}
snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def UpperCAmelCase ( self , A ) -> Tuple:
return self.sp_model.piece_to_id(A )
def UpperCAmelCase ( self , A ) -> int:
snake_case : Union[str, Any] = self.sp_model.IdToPiece(A )
return token
def UpperCAmelCase ( self , A ) -> Tuple:
snake_case : Optional[int] = []
snake_case : str = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A ) + token
snake_case : Dict = []
else:
current_sub_tokens.append(A )
out_string += self.sp_model.decode(A )
return out_string.strip()
def UpperCAmelCase ( self , A , A=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
snake_case : Any = [1]
if token_ids_a is None:
return ([0] * len(A )) + suffix_ones
return ([0] * len(A )) + ([0] * len(A )) + suffix_ones
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Optional[Any] = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , """wb""" ) as fi:
snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 684 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase = False ) -> str:
if not isinstance(lowercase ,lowercase ):
snake_case : str = f"""Expected string as input, found {type(lowercase )}"""
raise ValueError(lowercase )
if not isinstance(lowercase ,lowercase ):
snake_case : Dict = f"""Expected boolean as use_pascal parameter, found {type(lowercase )}"""
raise ValueError(lowercase )
snake_case : int = input_str.split("""_""" )
snake_case : Tuple = 0 if use_pascal else 1
snake_case : Union[str, Any] = words[start_index:]
snake_case : List[Any] = [word[0].upper() + word[1:] for word in words_to_capitalize]
snake_case : Tuple = """""" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 684 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """gpt_neox_japanese"""
def __init__( self , A=3_2_0_0_0 , A=2_5_6_0 , A=3_2 , A=3_2 , A=4 , A="gelu" , A=1.00 , A=1_0_0_0_0 , A=2_0_4_8 , A=0.02 , A=1e-5 , A=True , A=3_1_9_9_6 , A=3_1_9_9_9 , A=0.1 , A=0.0 , **A , ) -> str:
super().__init__(bos_token_id=A , eos_token_id=A , **A )
snake_case : Optional[Any] = vocab_size
snake_case : Optional[Any] = max_position_embeddings
snake_case : Union[str, Any] = hidden_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : Optional[int] = intermediate_multiple_size
snake_case : int = hidden_act
snake_case : str = rotary_pct
snake_case : Optional[Any] = rotary_emb_base
snake_case : Any = initializer_range
snake_case : Any = layer_norm_eps
snake_case : Optional[Any] = use_cache
snake_case : Tuple = attention_dropout
snake_case : Tuple = hidden_dropout
| 684 | 1 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowercase :
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( *A , **A ) -> List[str]:
pass
@is_pipeline_test
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
"""simple docstring"""
_snake_case = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def UpperCAmelCase ( self , A , A , A ) -> str:
snake_case : List[str] = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
snake_case : str = [
{
"""image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def UpperCAmelCase ( self , A , A ) -> int:
snake_case : Dict = vqa_pipeline(A , top_k=1 )
self.assertEqual(
A , [
[{"""score""": ANY(A ), """answer""": ANY(A )}],
[{"""score""": ANY(A ), """answer""": ANY(A )}],
] , )
@require_torch
def UpperCAmelCase ( self ) -> str:
snake_case : Union[str, Any] = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
snake_case : List[str] = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
snake_case : Union[str, Any] = """How many cats are there?"""
snake_case : Union[str, Any] = vqa_pipeline(image=A , question="""How many cats are there?""" , top_k=2 )
self.assertEqual(
A , [{"""score""": ANY(A ), """answer""": ANY(A )}, {"""score""": ANY(A ), """answer""": ANY(A )}] )
snake_case : Tuple = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
A , [{"""score""": ANY(A ), """answer""": ANY(A )}, {"""score""": ANY(A ), """answer""": ANY(A )}] )
@slow
@require_torch
def UpperCAmelCase ( self ) -> int:
snake_case : Dict = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""" )
snake_case : Optional[Any] = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
snake_case : Optional[int] = """How many cats are there?"""
snake_case : Tuple = vqa_pipeline(image=A , question=A , top_k=2 )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}] )
snake_case : Optional[int] = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}] )
snake_case : Any = vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [[{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}]] * 2 , )
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""" )
def UpperCAmelCase ( self ) -> Any:
pass
| 684 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Optional[Any] = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
snake_case : Any = hex_num[0] == """-"""
if is_negative:
snake_case : int = hex_num[1:]
try:
snake_case : List[Any] = int(lowercase ,16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
snake_case : Dict = """"""
while int_num > 0:
snake_case : Dict = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 1 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __lowercase :
"""simple docstring"""
def __init__( self , A , A=1_3 , A=3_0 , A=2 , A=3 , A=True , A=True , A=3_2 , A=5 , A=4 , A=3_7 , A="gelu" , A=0.1 , A=0.1 , A=1_0 , A=0.02 , A=3 , A=None , A=2 , ) -> Union[str, Any]:
snake_case : Tuple = parent
snake_case : str = batch_size
snake_case : Union[str, Any] = image_size
snake_case : Union[str, Any] = patch_size
snake_case : Dict = num_channels
snake_case : Union[str, Any] = is_training
snake_case : List[Any] = use_labels
snake_case : int = hidden_size
snake_case : Tuple = num_hidden_layers
snake_case : List[str] = num_attention_heads
snake_case : Optional[Any] = intermediate_size
snake_case : Optional[Any] = hidden_act
snake_case : Optional[int] = hidden_dropout_prob
snake_case : Any = attention_probs_dropout_prob
snake_case : List[Any] = type_sequence_label_size
snake_case : List[str] = initializer_range
snake_case : Tuple = scope
snake_case : List[Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
snake_case : Optional[Any] = (image_size // patch_size) ** 2
snake_case : int = num_patches + 2
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case : Optional[int] = None
if self.use_labels:
snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : str = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ) -> List[str]:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase ( self , A , A , A ) -> List[str]:
snake_case : Optional[int] = DeiTModel(config=A )
model.to(A )
model.eval()
snake_case : Dict = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , A , A , A ) -> Optional[int]:
snake_case : List[str] = DeiTForMaskedImageModeling(config=A )
model.to(A )
model.eval()
snake_case : Optional[int] = model(A )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case : Dict = 1
snake_case : int = DeiTForMaskedImageModeling(A )
model.to(A )
model.eval()
snake_case : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case : Any = model(A )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase ( self , A , A , A ) -> int:
snake_case : Tuple = self.type_sequence_label_size
snake_case : Optional[int] = DeiTForImageClassification(A )
model.to(A )
model.eval()
snake_case : Optional[Any] = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case : Union[str, Any] = 1
snake_case : List[str] = DeiTForImageClassification(A )
model.to(A )
model.eval()
snake_case : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case : Optional[int] = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self ) -> str:
snake_case : Optional[int] = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : Tuple = config_and_inputs
snake_case : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __lowercase (UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_snake_case = (
{
"""feature-extraction""": DeiTModel,
"""image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_snake_case = False
_snake_case = False
_snake_case = False
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Tuple = DeiTModelTester(self )
snake_case : Tuple = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=3_7 )
def UpperCAmelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def UpperCAmelCase ( self ) -> Dict:
pass
def UpperCAmelCase ( self ) -> List[str]:
snake_case , snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Any = model_class(A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A , nn.Linear ) )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case , snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Dict = model_class(A )
snake_case : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : Dict = [*signature.parameters.keys()]
snake_case : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase ( self ) -> str:
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A )
def UpperCAmelCase ( self ) -> Dict:
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCAmelCase ( self , A , A , A=False ) -> List[Any]:
snake_case : Union[str, Any] = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase ( self ) -> List[Any]:
if not self.model_tester.is_training:
return
snake_case , snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : List[str] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(A )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
snake_case : Optional[Any] = model_class(A )
model.to(A )
model.train()
snake_case : List[Any] = self._prepare_for_class(A , A , return_labels=A )
snake_case : int = model(**A ).loss
loss.backward()
def UpperCAmelCase ( self ) -> Any:
snake_case , snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
snake_case : List[Any] = False
snake_case : Any = True
for model_class in self.all_model_classes:
if model_class in get_values(A ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
snake_case : Union[str, Any] = model_class(A )
model.gradient_checkpointing_enable()
model.to(A )
model.train()
snake_case : Tuple = self._prepare_for_class(A , A , return_labels=A )
snake_case : List[Any] = model(**A ).loss
loss.backward()
def UpperCAmelCase ( self ) -> Dict:
snake_case , snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : str = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(A ),
*get_values(A ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"""Testing {model_class} with {problem_type["title"]}""" ):
snake_case : Dict = problem_type["""title"""]
snake_case : Dict = problem_type["""num_labels"""]
snake_case : Dict = model_class(A )
model.to(A )
model.train()
snake_case : Any = self._prepare_for_class(A , A , return_labels=A )
if problem_type["num_labels"] > 1:
snake_case : Any = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
snake_case : List[str] = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=A ) as warning_list:
snake_case : Optional[int] = model(**A ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def UpperCAmelCase ( self ) -> Dict:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Dict = DeiTModel.from_pretrained(A )
self.assertIsNotNone(A )
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
snake_case : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self ) -> Union[str, Any]:
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Dict = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
A )
snake_case : int = self.default_image_processor
snake_case : Dict = prepare_img()
snake_case : Optional[Any] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
snake_case : int = model(**A )
# verify the logits
snake_case : Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , A )
snake_case : Any = torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Dict = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
snake_case : Optional[Any] = self.default_image_processor
snake_case : Tuple = prepare_img()
snake_case : str = image_processor(images=A , return_tensors="""pt""" )
snake_case : str = inputs.pixel_values.to(A )
# forward pass to make sure inference works in fp16
with torch.no_grad():
snake_case : List[Any] = model(A )
| 684 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""pixel_values"""]
def __init__( self , A = True , A = None , A = PIL.Image.BICUBIC , A = True , A = None , A = 1 / 2_5_5 , A = True , A = True , A = None , A = None , **A , ) -> None:
super().__init__(**A )
snake_case : int = size if size is not None else {"""height""": 2_5_6, """width""": 2_5_6}
snake_case : int = get_size_dict(A )
snake_case : Optional[Any] = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
snake_case : Dict = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = do_resize
snake_case : str = size
snake_case : Tuple = resample
snake_case : Any = do_center_crop
snake_case : Tuple = crop_size
snake_case : int = do_rescale
snake_case : Dict = rescale_factor
snake_case : Union[str, Any] = do_normalize
snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase ( self , A , A , A = PIL.Image.BICUBIC , A = None , **A , ) -> np.ndarray:
snake_case : Dict = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
A , size=(size["""height"""], size["""width"""]) , resample=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> np.ndarray:
snake_case : Any = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(A , size=(size["""height"""], size["""width"""]) , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> Tuple:
return rescale(A , scale=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A , A = None , **A , ) -> np.ndarray:
return normalize(A , mean=A , std=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A = None , A = None , A=None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
snake_case : str = do_resize if do_resize is not None else self.do_resize
snake_case : Dict = resample if resample is not None else self.resample
snake_case : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Tuple = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case : int = image_mean if image_mean is not None else self.image_mean
snake_case : List[str] = image_std if image_std is not None else self.image_std
snake_case : Dict = size if size is not None else self.size
snake_case : Tuple = get_size_dict(A )
snake_case : Dict = crop_size if crop_size is not None else self.crop_size
snake_case : List[str] = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case : Optional[Any] = [to_numpy_array(A ) for image in images]
if do_resize:
snake_case : Dict = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_center_crop:
snake_case : List[str] = [self.center_crop(image=A , size=A ) for image in images]
if do_rescale:
snake_case : List[str] = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
snake_case : str = [self.normalize(image=A , mean=A , std=A ) for image in images]
snake_case : Union[str, Any] = [to_channel_dimension_format(A , A ) for image in images]
snake_case : List[Any] = {"""pixel_values""": images}
return BatchFeature(data=A , tensor_type=A )
| 684 | 1 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowerCamelCase : str = random.Random()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=1.0 ,lowercase=None ,lowercase=None ) -> Any:
if rng is None:
snake_case : Optional[int] = global_rng
snake_case : Dict = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def __init__( self , A , A=7 , A=4_0_0 , A=2_0_0_0 , A=1_0 , A=1_6_0 , A=8 , A=0.0 , A=4_0_0_0 , A=False , A=True , ) -> List[str]:
snake_case : Dict = parent
snake_case : Any = batch_size
snake_case : Union[str, Any] = min_seq_length
snake_case : List[str] = max_seq_length
snake_case : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case : List[str] = padding_value
snake_case : Optional[int] = sampling_rate
snake_case : Dict = return_attention_mask
snake_case : Any = do_normalize
snake_case : Tuple = feature_size
snake_case : int = chunk_length
snake_case : Union[str, Any] = hop_length
def UpperCAmelCase ( self ) -> Optional[int]:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase ( self , A=False , A=False ) -> Any:
def _flatten(A ):
return list(itertools.chain(*A ) )
if equal_length:
snake_case : Optional[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case : Dict = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case : List[str] = [np.asarray(A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowercase (UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = WhisperFeatureExtractor if is_speech_available() else None
def UpperCAmelCase ( self ) -> int:
snake_case : Any = WhisperFeatureExtractionTester(self )
def UpperCAmelCase ( self ) -> Dict:
snake_case : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case : Tuple = feat_extract_first.save_pretrained(A )[0]
check_json_file_has_correct_format(A )
snake_case : str = self.feature_extraction_class.from_pretrained(A )
snake_case : Any = feat_extract_first.to_dict()
snake_case : List[str] = feat_extract_second.to_dict()
snake_case : List[Any] = feat_extract_first.mel_filters
snake_case : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A , A ) )
self.assertEqual(A , A )
def UpperCAmelCase ( self ) -> Any:
snake_case : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case : Optional[Any] = os.path.join(A , """feat_extract.json""" )
feat_extract_first.to_json_file(A )
snake_case : List[str] = self.feature_extraction_class.from_json_file(A )
snake_case : int = feat_extract_first.to_dict()
snake_case : Tuple = feat_extract_second.to_dict()
snake_case : str = feat_extract_first.mel_filters
snake_case : List[str] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A , A ) )
self.assertEqual(A , A )
def UpperCAmelCase ( self ) -> Optional[Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
snake_case : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case : Optional[int] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case : Optional[Any] = [np.asarray(A ) for speech_input in speech_inputs]
# Test feature size
snake_case : str = feature_extractor(A , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
snake_case : int = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
snake_case : Tuple = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
# Test batched
snake_case : List[str] = feature_extractor(A , return_tensors="""np""" ).input_features
snake_case : List[str] = feature_extractor(A , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(A , A ):
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
snake_case : List[str] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
snake_case : Any = np.asarray(A )
snake_case : List[str] = feature_extractor(A , return_tensors="""np""" ).input_features
snake_case : Optional[Any] = feature_extractor(A , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(A , A ):
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
# Test truncation required
snake_case : int = [floats_list((1, x) )[0] for x in range(2_0_0 , (feature_extractor.n_samples + 5_0_0) , 2_0_0 )]
snake_case : Optional[Any] = [np.asarray(A ) for speech_input in speech_inputs]
snake_case : Any = [x[: feature_extractor.n_samples] for x in speech_inputs]
snake_case : Any = [np.asarray(A ) for speech_input in speech_inputs_truncated]
snake_case : Dict = feature_extractor(A , return_tensors="""np""" ).input_features
snake_case : List[Any] = feature_extractor(A , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(A , A ):
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
def UpperCAmelCase ( self ) -> str:
import torch
snake_case : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case : Optional[int] = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
snake_case : List[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case : Optional[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
snake_case : Optional[int] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCAmelCase ( self , A ) -> List[Any]:
snake_case : int = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
snake_case : Any = ds.sort("""id""" ).select(range(A ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def UpperCAmelCase ( self ) -> Tuple:
# fmt: off
snake_case : Tuple = torch.tensor(
[
0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51,
0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78,
0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54,
-0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54
] )
# fmt: on
snake_case : int = self._load_datasamples(1 )
snake_case : Optional[Any] = WhisperFeatureExtractor()
snake_case : List[str] = feature_extractor(A , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] , A , atol=1e-4 ) )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case : int = self._load_datasamples(1 )[0]
snake_case : Union[str, Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
snake_case : Dict = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A )[0]
self.assertTrue(np.all(np.mean(A ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(A ) - 1 ) < 1e-3 ) )
| 684 |
import inspect
import unittest
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase ( self ) -> Tuple:
import diffusers
from diffusers.dependency_versions_table import deps
snake_case : List[str] = inspect.getmembers(A , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
snake_case : Tuple = """k-diffusion"""
elif backend == "invisible_watermark":
snake_case : Optional[int] = """invisible-watermark"""
assert backend in deps, f"""{backend} is not in the deps table!"""
| 684 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowercase (UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = KandinskyInpaintPipeline
_snake_case = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
_snake_case = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
_snake_case = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_snake_case = False
@property
def UpperCAmelCase ( self ) -> str:
return 3_2
@property
def UpperCAmelCase ( self ) -> List[str]:
return 3_2
@property
def UpperCAmelCase ( self ) -> str:
return self.time_input_dim
@property
def UpperCAmelCase ( self ) -> Dict:
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self ) -> str:
return 1_0_0
@property
def UpperCAmelCase ( self ) -> List[str]:
snake_case : str = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def UpperCAmelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
snake_case : Optional[int] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
snake_case : List[str] = MultilingualCLIP(A )
snake_case : Optional[int] = text_encoder.eval()
return text_encoder
@property
def UpperCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
snake_case : Tuple = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case : Union[str, Any] = UNetaDConditionModel(**A )
return model
@property
def UpperCAmelCase ( self ) -> str:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
snake_case : str = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : List[str] = self.dummy_text_encoder
snake_case : List[Any] = self.dummy_tokenizer
snake_case : Any = self.dummy_unet
snake_case : List[Any] = self.dummy_movq
snake_case : Optional[Any] = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="""linear""" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=A , set_alpha_to_one=A , steps_offset=1 , prediction_type="""epsilon""" , thresholding=A , )
snake_case : List[Any] = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCAmelCase ( self , A , A=0 ) -> Any:
snake_case : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(A ) ).to(A )
snake_case : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(A )
# create init_image
snake_case : Any = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(A ) ).to(A )
snake_case : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case : List[Any] = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ).resize((2_5_6, 2_5_6) )
# create mask
snake_case : Optional[int] = np.ones((6_4, 6_4) , dtype=np.floataa )
snake_case : Dict = 0
if str(A ).startswith("""mps""" ):
snake_case : List[Any] = torch.manual_seed(A )
else:
snake_case : List[Any] = torch.Generator(device=A ).manual_seed(A )
snake_case : List[str] = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def UpperCAmelCase ( self ) -> int:
snake_case : Any = """cpu"""
snake_case : Optional[Any] = self.get_dummy_components()
snake_case : List[str] = self.pipeline_class(**A )
snake_case : Optional[int] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
snake_case : str = pipe(**self.get_dummy_inputs(A ) )
snake_case : Dict = output.images
snake_case : Optional[int] = pipe(
**self.get_dummy_inputs(A ) , return_dict=A , )[0]
snake_case : Optional[Any] = image[0, -3:, -3:, -1]
snake_case : Tuple = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 6_4, 6_4, 3)
snake_case : Any = np.array(
[0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def UpperCAmelCase ( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
snake_case : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
snake_case : List[str] = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
snake_case : List[str] = 0
snake_case : int = """a hat"""
snake_case : str = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(A )
snake_case : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
snake_case : List[Any] = pipeline.to(A )
pipeline.set_progress_bar_config(disable=A )
snake_case : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case , snake_case : Dict = pipe_prior(
A , generator=A , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
snake_case : Optional[Any] = pipeline(
A , image=A , mask_image=A , image_embeds=A , negative_image_embeds=A , generator=A , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type="""np""" , )
snake_case : str = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(A , A )
| 684 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
lowerCamelCase : Union[str, Any] = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
lowerCamelCase : List[Any] = 'main'
# Default branch name
lowerCamelCase : Tuple = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
lowerCamelCase : List[Any] = 'aaaaaaa'
# This commit does not exist, so we should 404.
lowerCamelCase : List[Any] = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
lowerCamelCase : int = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> int:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> Optional[Any]:
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def UpperCAmelCase ( self ) -> Optional[Any]:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_tf
def UpperCAmelCase ( self ) -> str:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_flax
def UpperCAmelCase ( self ) -> Any:
# Flax models don't have labels
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , [] )
| 684 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """xmod"""
def __init__( self , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=2 , A=0.02 , A=1e-1_2 , A=1 , A=0 , A=2 , A="absolute" , A=True , A=None , A=False , A=2 , A=False , A=True , A=True , A=("en_XX",) , A=None , **A , ) -> str:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
snake_case : str = vocab_size
snake_case : List[Any] = hidden_size
snake_case : str = num_hidden_layers
snake_case : int = num_attention_heads
snake_case : List[str] = hidden_act
snake_case : str = intermediate_size
snake_case : List[str] = hidden_dropout_prob
snake_case : Any = attention_probs_dropout_prob
snake_case : Optional[int] = max_position_embeddings
snake_case : Dict = type_vocab_size
snake_case : str = initializer_range
snake_case : List[str] = layer_norm_eps
snake_case : str = position_embedding_type
snake_case : List[Any] = use_cache
snake_case : Dict = classifier_dropout
snake_case : Union[str, Any] = pre_norm
snake_case : Union[str, Any] = adapter_reduction_factor
snake_case : Optional[int] = adapter_layer_norm
snake_case : str = adapter_reuse_layer_norm
snake_case : Tuple = ln_before_adapter
snake_case : List[Any] = list(A )
snake_case : Tuple = default_language
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 684 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """audio-spectrogram-transformer"""
def __init__( self , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.0 , A=0.0 , A=0.02 , A=1e-1_2 , A=1_6 , A=True , A=1_0 , A=1_0 , A=1_0_2_4 , A=1_2_8 , **A , ) -> int:
super().__init__(**A )
snake_case : Any = hidden_size
snake_case : Tuple = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Dict = intermediate_size
snake_case : int = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : int = initializer_range
snake_case : int = layer_norm_eps
snake_case : Any = patch_size
snake_case : List[Any] = qkv_bias
snake_case : int = frequency_stride
snake_case : Any = time_stride
snake_case : Union[str, Any] = max_length
snake_case : Any = num_mel_bins
| 684 | 1 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
lowerCamelCase : List[Any] = 'scheduler_config.json'
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = 1
_snake_case = 2
_snake_case = 3
_snake_case = 4
_snake_case = 5
_snake_case = 6
_snake_case = 7
_snake_case = 8
_snake_case = 9
_snake_case = 10
_snake_case = 11
_snake_case = 12
_snake_case = 13
_snake_case = 14
@dataclass
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = 42
class __lowercase :
"""simple docstring"""
_snake_case = SCHEDULER_CONFIG_NAME
_snake_case = []
_snake_case = True
@classmethod
def UpperCAmelCase ( cls , A = None , A = None , A=False , **A , ) -> List[Any]:
snake_case , snake_case , snake_case : Optional[int] = cls.load_config(
pretrained_model_name_or_path=A , subfolder=A , return_unused_kwargs=A , return_commit_hash=A , **A , )
return cls.from_config(A , return_unused_kwargs=A , **A )
def UpperCAmelCase ( self , A , A = False , **A ) -> Union[str, Any]:
self.save_config(save_directory=A , push_to_hub=A , **A )
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
return self._get_compatibles()
@classmethod
def UpperCAmelCase ( cls ) -> Tuple:
snake_case : Union[str, Any] = list(set([cls.__name__] + cls._compatibles ) )
snake_case : Optional[Any] = importlib.import_module(__name__.split(""".""" )[0] )
snake_case : int = [
getattr(A , A ) for c in compatible_classes_str if hasattr(A , A )
]
return compatible_classes
| 684 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase : Any = logging.get_logger(__name__)
class __lowercase (enum.Enum ):
"""simple docstring"""
_snake_case = 0
_snake_case = 1
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """generated"""
def __init__( self , *A , **A ) -> Optional[Any]:
super().__init__(*A , **A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase ( self , A=None , A=None , A=None , A=None , A=None , A=None , **A , ) -> Optional[int]:
snake_case : Tuple = {}
if truncation is not None:
snake_case : Union[str, Any] = truncation
snake_case : Dict = generate_kwargs
snake_case : int = {}
if return_tensors is not None and return_type is None:
snake_case : List[Any] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
snake_case : List[str] = return_type
if clean_up_tokenization_spaces is not None:
snake_case : int = clean_up_tokenization_spaces
if stop_sequence is not None:
snake_case : Tuple = self.tokenizer.encode(A , add_special_tokens=A )
if len(A ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
snake_case : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
return True
def UpperCAmelCase ( self , *A , A ) -> Tuple:
snake_case : Union[str, Any] = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , A ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
snake_case : Union[str, Any] = ([prefix + arg for arg in args[0]],)
snake_case : List[Any] = True
elif isinstance(args[0] , A ):
snake_case : str = (prefix + args[0],)
snake_case : str = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
snake_case : Optional[Any] = self.tokenizer(*A , padding=A , truncation=A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *A , **A ) -> Union[str, Any]:
snake_case : Tuple = super().__call__(*A , **A )
if (
isinstance(args[0] , A )
and all(isinstance(A , A ) for el in args[0] )
and all(len(A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase ( self , A , A=TruncationStrategy.DO_NOT_TRUNCATE , **A ) -> str:
snake_case : Optional[Any] = self._parse_and_tokenize(A , truncation=A , **A )
return inputs
def UpperCAmelCase ( self , A , **A ) -> Tuple:
if self.framework == "pt":
snake_case , snake_case : List[str] = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
snake_case , snake_case : Optional[Any] = tf.shape(model_inputs["""input_ids"""] ).numpy()
snake_case : Dict = generate_kwargs.get("""min_length""" , self.model.config.min_length )
snake_case : str = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(A , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
snake_case : List[str] = self.model.generate(**A , **A )
snake_case : Dict = output_ids.shape[0]
if self.framework == "pt":
snake_case : List[Any] = output_ids.reshape(A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
snake_case : Any = tf.reshape(A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase ( self , A , A=ReturnType.TEXT , A=False ) -> Union[str, Any]:
snake_case : Tuple = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
snake_case : Dict = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
snake_case : int = {
f"""{self.return_name}_text""": self.tokenizer.decode(
A , skip_special_tokens=A , clean_up_tokenization_spaces=A , )
}
records.append(A )
return records
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """summary"""
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
def UpperCAmelCase ( self , A , A , A ) -> bool:
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """translation"""
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def UpperCAmelCase ( self , *A , A=TruncationStrategy.DO_NOT_TRUNCATE , A=None , A=None ) -> Optional[int]:
if getattr(self.tokenizer , """_build_translation_inputs""" , A ):
return self.tokenizer._build_translation_inputs(
*A , return_tensors=self.framework , truncation=A , src_lang=A , tgt_lang=A )
else:
return super()._parse_and_tokenize(*A , truncation=A )
def UpperCAmelCase ( self , A=None , A=None , **A ) -> Union[str, Any]:
snake_case , snake_case , snake_case : str = super()._sanitize_parameters(**A )
if src_lang is not None:
snake_case : Tuple = src_lang
if tgt_lang is not None:
snake_case : str = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
snake_case : Union[str, Any] = kwargs.get("""task""" , self.task )
snake_case : Any = task.split("""_""" )
if task and len(A ) == 4:
# translation, XX, to YY
snake_case : Optional[Any] = items[1]
snake_case : Dict = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
| 684 | 1 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __lowercase (UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = VQModel
_snake_case = """sample"""
@property
def UpperCAmelCase ( self , A=(3_2, 3_2) ) -> Any:
snake_case : Dict = 4
snake_case : str = 3
snake_case : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(A )
return {"sample": image}
@property
def UpperCAmelCase ( self ) -> Dict:
return (3, 3_2, 3_2)
@property
def UpperCAmelCase ( self ) -> Any:
return (3, 3_2, 3_2)
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Tuple = {
"""block_out_channels""": [3_2, 6_4],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 3,
}
snake_case : List[str] = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase ( self ) -> Dict:
pass
def UpperCAmelCase ( self ) -> Optional[int]:
pass
def UpperCAmelCase ( self ) -> Dict:
snake_case , snake_case : Optional[int] = VQModel.from_pretrained("""fusing/vqgan-dummy""" , output_loading_info=A )
self.assertIsNotNone(A )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(A )
snake_case : Optional[int] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Tuple = VQModel.from_pretrained("""fusing/vqgan-dummy""" )
model.to(A ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
snake_case : List[str] = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
snake_case : Union[str, Any] = image.to(A )
with torch.no_grad():
snake_case : Dict = model(A ).sample
snake_case : Any = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case : List[str] = torch.tensor([-0.01_53, -0.40_44, -0.18_80, -0.51_61, -0.24_18, -0.40_72, -0.16_12, -0.06_33, -0.01_43] )
# fmt: on
self.assertTrue(torch.allclose(A , A , atol=1e-3 ) )
| 684 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
snake_case : int = []
for line in lines:
snake_case : Dict = re.sub(R"""#.*""" ,"""""" ,lowercase ) # remove comments
if line:
filtered_lines.append(lowercase )
snake_case : Optional[int] = """\n""".join(lowercase )
# Make a hash from all this code
snake_case : List[str] = full_str.encode("""utf-8""" )
return shaaaa(lowercase ).hexdigest()
# get importable module names and hash for caching
lowerCamelCase : Any = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
lowerCamelCase : Optional[int] = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
lowerCamelCase : Tuple = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
lowerCamelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 684 | 1 |
from itertools import permutations
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
snake_case : Dict = [7, 11, 13, 17]
for i, test in enumerate(lowercase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( lowercase = 10 ) -> int:
return sum(
int("""""".join(map(lowercase ,lowercase ) ) )
for num in permutations(range(lowercase ) )
if is_substring_divisible(lowercase ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Tuple:
# Initialise PyTorch model
snake_case : int = RemBertConfig.from_json_file(lowercase )
print("""Building PyTorch model from configuration: {}""".format(str(lowercase ) ) )
snake_case : Tuple = RemBertModel(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowercase ,lowercase ,lowercase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(lowercase ) )
torch.save(model.state_dict() ,lowercase )
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCamelCase : Dict = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 684 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """naver-clova-ix/donut-base-finetuned-docvqa"""
_snake_case = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
_snake_case = """document_qa"""
_snake_case = AutoProcessor
_snake_case = VisionEncoderDecoderModel
_snake_case = ["""image""", """text"""]
_snake_case = ["""text"""]
def __init__( self , *A , **A ) -> Dict:
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*A , **A )
def UpperCAmelCase ( self , A , A ) -> Optional[Any]:
snake_case : int = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
snake_case : Any = task_prompt.replace("""{user_input}""" , A )
snake_case : List[Any] = self.pre_processor.tokenizer(
A , add_special_tokens=A , return_tensors="""pt""" ).input_ids
snake_case : Union[str, Any] = self.pre_processor(A , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def UpperCAmelCase ( self , A ) -> Optional[Any]:
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=A , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=A , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=A , ).sequences
def UpperCAmelCase ( self , A ) -> Tuple:
snake_case : Optional[int] = self.pre_processor.batch_decode(A )[0]
snake_case : int = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
snake_case : Tuple = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
snake_case : List[Any] = re.sub(r"""<.*?>""" , """""" , A , count=1 ).strip() # remove first task start token
snake_case : Any = self.pre_processor.tokenajson(A )
return sequence["answer"]
| 684 |
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[str]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Union[str, Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Any:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Dict:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
| 684 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Union[str, Any]:
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
snake_case : Any = (boundary[1] - boundary[0]) / steps
snake_case : Optional[Any] = boundary[0]
snake_case : Dict = boundary[1]
snake_case : List[str] = make_points(lowercase ,lowercase ,lowercase )
snake_case : Any = 0.0
y += (h / 2.0) * f(lowercase )
for i in x_i:
# print(i)
y += h * f(lowercase )
y += (h / 2.0) * f(lowercase )
return y
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> List[Any]:
snake_case : Union[str, Any] = a + h
while x < (b - h):
yield x
snake_case : str = x + h
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[Any]: # enter your function here
snake_case : Any = (x - 0) * (x - 0)
return y
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
snake_case : Optional[Any] = 0.0 # Lower bound of integration
snake_case : List[str] = 1.0 # Upper bound of integration
snake_case : List[str] = 10.0 # define number of steps or resolution
snake_case : List[Any] = [a, b] # define boundary of integration
snake_case : Optional[Any] = method_a(lowercase ,lowercase )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 684 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCamelCase : List[str] = 3
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
print("""Generating primitive root of p""" )
while True:
snake_case : Optional[int] = random.randrange(3 ,lowercase )
if pow(lowercase ,2 ,lowercase ) == 1:
continue
if pow(lowercase ,lowercase ,lowercase ) == 1:
continue
return g
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print("""Generating prime p...""" )
snake_case : Optional[int] = rabin_miller.generate_large_prime(lowercase ) # select large prime number.
snake_case : Optional[int] = primitive_root(lowercase ) # one primitive root on modulo p.
snake_case : Optional[Any] = random.randrange(3 ,lowercase ) # private_key -> have to be greater than 2 for safety.
snake_case : Tuple = cryptomath.find_mod_inverse(pow(lowercase ,lowercase ,lowercase ) ,lowercase )
snake_case : str = (key_size, e_a, e_a, p)
snake_case : Optional[Any] = (key_size, d)
return public_key, private_key
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> None:
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print("""\nWARNING:""" )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
snake_case , snake_case : Optional[Any] = generate_key(lowercase )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
print("""Making key files...""" )
make_key_files("""elgamal""" ,2048 )
print("""Key files generation successful""" )
if __name__ == "__main__":
main()
| 684 | 1 |
import os
from collections.abc import Iterator
def SCREAMING_SNAKE_CASE__ ( lowercase = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(lowercase ):
snake_case : Any = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(lowercase )[1] in (".py", ".ipynb"):
yield os.path.join(lowercase ,lowercase ).lstrip("""./""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
return f"""{i * " "}*""" if i else "\n##"
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> str:
snake_case : Union[str, Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(lowercase ) or old_parts[i] != new_part) and new_part:
print(f"""{md_prefix(lowercase )} {new_part.replace("_" ," " ).title()}""" )
return new_path
def SCREAMING_SNAKE_CASE__ ( lowercase = "." ) -> None:
snake_case : str = """"""
for filepath in sorted(good_file_paths(lowercase ) ):
snake_case , snake_case : Optional[Any] = os.path.split(lowercase )
if filepath != old_path:
snake_case : Any = print_path(lowercase ,lowercase )
snake_case : Optional[int] = (filepath.count(os.sep ) + 1) if filepath else 0
snake_case : List[str] = f"""{filepath}/{filename}""".replace(""" """ ,"""%20""" )
snake_case : Optional[int] = os.path.splitext(filename.replace("""_""" ,""" """ ).title() )[0]
print(f"""{md_prefix(lowercase )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md('.')
| 684 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
snake_case : Dict = _modexpt(lowercase ,exponent // 2 ,lowercase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowercase ,exponent - 1 ,lowercase )) % modulo_value
def SCREAMING_SNAKE_CASE__ ( lowercase = 1777 ,lowercase = 1855 ,lowercase = 8 ) -> int:
snake_case : int = base
for _ in range(1 ,lowercase ):
snake_case : List[str] = _modexpt(lowercase ,lowercase ,10**digits )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 | 1 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , A = "▁" , A = True , A = "<unk>" , A = "</s>" , A = "<pad>" , ) -> str:
snake_case : Optional[int] = {
"""pad""": {"""id""": 0, """token""": pad_token},
"""eos""": {"""id""": 1, """token""": eos_token},
"""unk""": {"""id""": 2, """token""": unk_token},
}
snake_case : List[str] = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
snake_case : Union[str, Any] = token_dict["""token"""]
snake_case : Any = Tokenizer(Unigram() )
snake_case : List[str] = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(""" {2,}""" ) , """ """ ),
normalizers.Lowercase(),
] )
snake_case : str = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=A , add_prefix_space=A ),
pre_tokenizers.Digits(individual_digits=A ),
pre_tokenizers.Punctuation(),
] )
snake_case : Tuple = decoders.Metaspace(replacement=A , add_prefix_space=A )
snake_case : int = TemplateProcessing(
single=f"""$A {self.special_tokens["eos"]["token"]}""" , special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] , )
snake_case : Optional[Any] = {
"""model""": """SentencePieceUnigram""",
"""replacement""": replacement,
"""add_prefix_space""": add_prefix_space,
}
super().__init__(A , A )
def UpperCAmelCase ( self , A , A = 8_0_0_0 , A = True , ) -> Optional[Any]:
snake_case : Dict = trainers.UnigramTrainer(
vocab_size=A , special_tokens=self.special_tokens_list , show_progress=A , )
if isinstance(A , A ):
snake_case : List[str] = [files]
self._tokenizer.train(A , trainer=A )
self.add_unk_id()
def UpperCAmelCase ( self , A , A = 8_0_0_0 , A = True , ) -> List[str]:
snake_case : Optional[int] = trainers.UnigramTrainer(
vocab_size=A , special_tokens=self.special_tokens_list , show_progress=A , )
self._tokenizer.train_from_iterator(A , trainer=A )
self.add_unk_id()
def UpperCAmelCase ( self ) -> int:
snake_case : Any = json.loads(self._tokenizer.to_str() )
snake_case : List[Any] = self.special_tokens["""unk"""]["""id"""]
snake_case : Union[str, Any] = Tokenizer.from_str(json.dumps(A ) )
| 684 |
from itertools import product
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list[int]:
snake_case : Tuple = sides_number
snake_case : List[str] = max_face_number * dice_number
snake_case : Any = [0] * (max_total + 1)
snake_case : int = 1
snake_case : List[str] = range(lowercase ,max_face_number + 1 )
for dice_numbers in product(lowercase ,repeat=lowercase ):
snake_case : Any = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def SCREAMING_SNAKE_CASE__ ( ) -> float:
snake_case : List[str] = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
snake_case : str = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
snake_case : Optional[int] = 0
snake_case : List[str] = 9
snake_case : Union[str, Any] = 4 * 9
snake_case : Dict = 6
for peter_total in range(lowercase ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
snake_case : str = (4**9) * (6**6)
snake_case : int = peter_wins_count / total_games_number
snake_case : Optional[int] = round(lowercase ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 | 1 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'vocab_file': 'spiece.model'}
lowerCamelCase : str = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
lowerCamelCase : List[str] = {
'AI-Sweden/gpt-sw3-126m': 2_0_4_8,
'AI-Sweden/gpt-sw3-350m': 2_0_4_8,
'AI-Sweden/gpt-sw3-1.6b': 2_0_4_8,
'AI-Sweden/gpt-sw3-6.7b': 2_0_4_8,
'AI-Sweden/gpt-sw3-20b': 2_0_4_8,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A=False , A=False , A=False , A=None , A=None , A=None , A=None , A = None , **A , ) -> None:
snake_case : int = {} if sp_model_kwargs is None else sp_model_kwargs
snake_case : int = kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
snake_case : Any = """None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
snake_case : Tuple = """<|endoftext|>""" if eos_token is None else eos_token
snake_case : List[Any] = """<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
snake_case : List[str] = unk_token if pad_token is None else pad_token
snake_case : Tuple = eos_token if bos_token is None else bos_token
else:
snake_case : Optional[int] = """<pad>""" if pad_token is None else pad_token
snake_case : str = """<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , pad_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
snake_case : Dict = do_lower_case
snake_case : List[str] = remove_space
snake_case : Union[str, Any] = keep_accents
snake_case : List[str] = vocab_file
snake_case : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
# Used for whitespace normalization in input texts
# fmt : off
snake_case : Dict = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
snake_case : Union[str, Any] = re.compile(
f"""[{"".join(map(A , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]""" )
def __getstate__( self ) -> Any:
snake_case : Any = self.__dict__.copy()
snake_case : Dict = None
return state
def __setstate__( self , A ) -> List[str]:
snake_case : str = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case : str = {}
snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def UpperCAmelCase ( self ) -> int:
return len(self.sp_model )
def UpperCAmelCase ( self , A ) -> str:
snake_case : Union[str, Any] = self.non_printing_characters_re.sub("""""" , A )
# Normalize whitespaces
snake_case : int = """""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
snake_case : str = unicodedata.normalize("""NFC""" , A )
return text
def UpperCAmelCase ( self , A , **A ) -> List[str]:
snake_case : Union[str, Any] = self.preprocess_text(A )
return self.sp_model.encode(A , out_type=A )
def UpperCAmelCase ( self , A ) -> int:
return self.sp_model.PieceToId(A )
def UpperCAmelCase ( self , A ) -> str:
return self.sp_model.IdToPiece(A )
@staticmethod
def UpperCAmelCase ( A ) -> str:
return out_string
def UpperCAmelCase ( self , A ) -> str:
snake_case : int = []
snake_case : str = """"""
snake_case : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
snake_case : str = True
snake_case : Dict = []
else:
current_sub_tokens.append(A )
snake_case : Any = False
out_string += self.sp_model.decode(A )
return out_string
def UpperCAmelCase ( self ) -> Dict[str, int]:
snake_case : int = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : List[Any] = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , """wb""" ) as fi:
snake_case : Any = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
def UpperCAmelCase ( self , A , A = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(A , A ):
snake_case : int = self.preprocess_text(A )
snake_case : str = self.sp_model.encode(A )
else:
snake_case : Optional[int] = [self.preprocess_text(A ) for t in text]
snake_case : Any = self.sp_model.encode(A )
if return_tensors is True or return_tensors == "pt":
snake_case : Any = torch.tensor(A )
return token_ids
def UpperCAmelCase ( self , A ) -> str:
return self.sp_model.decode(A )
def UpperCAmelCase ( self , A ) -> List[int]:
snake_case : List[Any] = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
snake_case : int = (
f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(A ) + f"""{self.bos_token}Bot:"""
)
return self.encode(text=A )
| 684 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 684 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def __init__( self , A , A=7 , A=3 , A=1_8 , A=3_0 , A=4_0_0 , A=True , A=None , A=True , A=None , A=True , ) -> Any:
snake_case : Tuple = size if size is not None else {"""shortest_edge""": 2_0}
snake_case : List[str] = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case : Optional[Any] = parent
snake_case : int = batch_size
snake_case : str = num_channels
snake_case : Optional[int] = image_size
snake_case : Dict = min_resolution
snake_case : Union[str, Any] = max_resolution
snake_case : int = do_resize
snake_case : Any = size
snake_case : Dict = do_center_crop
snake_case : Any = crop_size
snake_case : List[str] = do_flip_channel_order
def UpperCAmelCase ( self ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class __lowercase (UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = MobileViTImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self ) -> str:
snake_case : Any = MobileViTImageProcessingTester(self )
@property
def UpperCAmelCase ( self ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ) -> Dict:
snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , """do_resize""" ) )
self.assertTrue(hasattr(A , """size""" ) )
self.assertTrue(hasattr(A , """do_center_crop""" ) )
self.assertTrue(hasattr(A , """center_crop""" ) )
self.assertTrue(hasattr(A , """do_flip_channel_order""" ) )
def UpperCAmelCase ( self ) -> Dict:
snake_case : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 2_0} )
self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8} )
snake_case : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} )
self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} )
def UpperCAmelCase ( self ) -> List[str]:
pass
def UpperCAmelCase ( self ) -> Any:
# Initialize image_processing
snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case : int = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase ( self ) -> List[str]:
# Initialize image_processing
snake_case : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case : Optional[Any] = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase ( self ) -> Dict:
# Initialize image_processing
snake_case : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
snake_case : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case : Optional[int] = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 684 |
import os
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
with open(os.path.dirname(lowercase ) + """/grid.txt""" ) as f:
snake_case : Tuple = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowercase ) for x in f.readline().split()] )
snake_case : Optional[Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
snake_case : List[Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
snake_case : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
snake_case : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
snake_case : str = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
snake_case : int = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
snake_case : int = temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
snake_case : Any = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
snake_case : Any = temp
return maximum
if __name__ == "__main__":
print(solution())
| 684 | 1 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
lowerCamelCase : Tuple = getLogger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase = 8 ,lowercase = 1024 ,lowercase="val" ,lowercase=None ,lowercase=False ,lowercase="summarization" ,lowercase=None ,lowercase=1 ,lowercase = None ,lowercase="" ,**lowercase ,) -> Dict:
snake_case : Union[str, Any] = str(lowercase )
assert local_rank is not None
torch.distributed.init_process_group(backend="""nccl""" ,rank=lowercase )
snake_case : Union[str, Any] = Path(lowercase )
snake_case : Tuple = save_dir.joinpath(f"""rank_{local_rank}_output.json""" )
torch.cuda.set_device(lowercase )
snake_case : str = AutoModelForSeqaSeqLM.from_pretrained(lowercase ).cuda()
if fpaa:
snake_case : int = model.half()
# determine if we need to increase num_beams
use_task_specific_params(lowercase ,lowercase ) # update config with task specific params
snake_case : Optional[Any] = generate_kwargs.pop("""num_beams""" ,model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
snake_case : Union[str, Any] = num_return_sequences
snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
if max_source_length is None:
snake_case : Dict = tokenizer.model_max_length
if prefix is None:
snake_case : str = prefix or getattr(model.config ,"""prefix""" ,"""""" ) or """"""
snake_case : Union[str, Any] = SeqaSeqDataset(
lowercase ,lowercase ,lowercase ,max_target_length=1024 ,type_path=lowercase ,n_obs=lowercase ,prefix=lowercase ,**lowercase ,)
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
snake_case : Dict = ds.make_sortish_sampler(lowercase ,distributed=lowercase ,add_extra_examples=lowercase ,shuffle=lowercase )
snake_case : Union[str, Any] = DataLoader(lowercase ,sampler=lowercase ,batch_size=lowercase ,collate_fn=ds.collate_fn )
snake_case : Optional[int] = []
for batch in tqdm(lowercase ):
snake_case : int = model.generate(
input_ids=batch["""input_ids"""].to(model.device ) ,attention_mask=batch["""attention_mask"""].to(model.device ) ,num_return_sequences=lowercase ,num_beams=lowercase ,**lowercase ,)
snake_case : List[Any] = tokenizer.batch_decode(lowercase ,skip_special_tokens=lowercase ,clean_up_tokenization_spaces=lowercase )
snake_case : List[str] = batch["""ids"""]
if num_return_sequences > 1:
snake_case : Union[str, Any] = chunks(lowercase ,lowercase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(lowercase ):
results.append({"""pred""": pred, """id""": ids[i].item()} )
save_json(lowercase ,lowercase )
return results, sampler.num_replicas
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
snake_case : Optional[int] = argparse.ArgumentParser(
epilog="""Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate""" )
parser.add_argument("""--data_dir""" ,type=lowercase ,help="""like cnn_dm/test.source""" )
parser.add_argument(
"""--model_name""" ,type=lowercase ,help="""like facebook/bart-large-cnn,t5-base, etc.""" ,default="""sshleifer/distilbart-xsum-12-3""" ,)
parser.add_argument("""--save_dir""" ,type=lowercase ,help="""where to save""" ,default="""tmp_gen""" )
parser.add_argument("""--max_source_length""" ,type=lowercase ,default=lowercase )
parser.add_argument(
"""--type_path""" ,type=lowercase ,default="""test""" ,help="""which subset to evaluate typically train/val/test""" )
parser.add_argument("""--task""" ,type=lowercase ,default="""summarization""" ,help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""" ,type=lowercase ,default=8 ,required=lowercase ,help="""batch size""" )
parser.add_argument(
"""--local_rank""" ,type=lowercase ,default=-1 ,required=lowercase ,help="""should be passed by distributed.launch""" )
parser.add_argument(
"""--n_obs""" ,type=lowercase ,default=lowercase ,required=lowercase ,help="""How many observations. Defaults to all.""" )
parser.add_argument(
"""--num_return_sequences""" ,type=lowercase ,default=1 ,required=lowercase ,help="""How many sequences to return""" )
parser.add_argument(
"""--sync_timeout""" ,type=lowercase ,default=600 ,required=lowercase ,help="""How long should master process wait for other processes to finish.""" ,)
parser.add_argument("""--src_lang""" ,type=lowercase ,default=lowercase ,required=lowercase )
parser.add_argument("""--tgt_lang""" ,type=lowercase ,default=lowercase ,required=lowercase )
parser.add_argument(
"""--prefix""" ,type=lowercase ,required=lowercase ,default=lowercase ,help="""will be added to the begininng of src examples""" )
parser.add_argument("""--fp16""" ,action="""store_true""" )
parser.add_argument("""--debug""" ,action="""store_true""" )
snake_case : Optional[Any] = time.time()
snake_case , snake_case : Optional[Any] = parser.parse_known_args()
snake_case : int = parse_numeric_n_bool_cl_kwargs(lowercase )
if generate_kwargs and args.local_rank <= 0:
print(f"""parsed the following generate kwargs: {generate_kwargs}""" )
snake_case : Tuple = Path(args.save_dir + """_tmp""" )
Path(lowercase ).mkdir(exist_ok=lowercase ) # this handles locking.
snake_case : Any = list(json_save_dir.glob("""rank_*.json""" ) )
if intermediate_files:
raise ValueError(f"""Found files at {json_save_dir} please move or remove them.""" )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
snake_case : List[Any] = {}
if args.src_lang is not None:
snake_case : Dict = args.src_lang
if args.tgt_lang is not None:
snake_case : Any = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=lowercase )
snake_case , snake_case : Tuple = eval_data_dir(
args.data_dir ,lowercase ,args.model_name ,type_path=args.type_path ,bs=args.bs ,fpaa=args.fpaa ,task=args.task ,local_rank=args.local_rank ,n_obs=args.n_obs ,max_source_length=args.max_source_length ,num_return_sequences=args.num_return_sequences ,prefix=args.prefix ,dataset_kwargs=lowercase ,**lowercase ,)
if args.local_rank <= 0:
snake_case : List[Any] = Path(args.save_dir )
save_dir.mkdir(exist_ok=lowercase )
snake_case : int = gather_results_from_each_node(lowercase ,lowercase ,args.sync_timeout )
snake_case : Dict = combine_partial_results(lowercase )
if args.num_return_sequences > 1:
snake_case : Union[str, Any] = save_dir.joinpath("""pseudolabel_results.json""" )
print(f"""Saving aggregated results at {save_path}, intermediate in {json_save_dir}/""" )
save_json(lowercase ,lowercase )
return
snake_case : Tuple = Path(args.data_dir ).joinpath(args.type_path + """.target""" )
with open(lowercase ) as f:
snake_case : Optional[int] = [x.rstrip() for x in f.readlines()][: len(lowercase )]
# Calculate metrics, save metrics, and save _generations.txt
snake_case : List[str] = """translation""" in args.task
snake_case : Tuple = calculate_bleu if calc_bleu else calculate_rouge
snake_case : int = """bleu""" if calc_bleu else """rouge"""
snake_case : Dict = score_fn(lowercase ,lowercase )
snake_case : int = len(lowercase )
snake_case : Union[str, Any] = time.time() - start_time
snake_case : Union[str, Any] = round(runtime / metrics["""n_obs"""] ,4 )
snake_case : List[Any] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
snake_case : List[Any] = save_dir.joinpath(f"""{args.type_path}_{metric_name}.json""" )
save_json(lowercase ,lowercase ,indent=lowercase )
print(lowercase )
write_txt_file(lowercase ,save_dir.joinpath(f"""{args.type_path}_generations.txt""" ) )
if args.debug:
write_txt_file(lowercase ,save_dir.joinpath(f"""{args.type_path}.target""" ) )
else:
shutil.rmtree(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List:
snake_case : Optional[int] = []
for partial_result in partial_results:
records.extend(lowercase )
snake_case : Any = sorted(lowercase ,key=lambda lowercase : x["id"] )
snake_case : Union[str, Any] = [x["""pred"""] for x in records]
return preds
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> List[Dict[str, List]]:
# WAIT FOR lots of .json files
snake_case : List[Any] = time.time()
logger.info("""waiting for all nodes to finish""" )
snake_case : Union[str, Any] = None
while (time.time() - start_wait) < timeout:
snake_case : str = list(save_dir.glob("""rank_*.json""" ) )
if len(lowercase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
snake_case : List[Any] = lmap(lowercase ,lowercase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("""Rank 0 gave up on waiting for other processes""" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 684 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list:
for i in range(len(lowercase ) - 1 ,0 ,-1 ):
snake_case : Any = False
for j in range(lowercase ,0 ,-1 ):
if unsorted[j] < unsorted[j - 1]:
snake_case , snake_case : Optional[Any] = unsorted[j - 1], unsorted[j]
snake_case : Dict = True
for j in range(lowercase ):
if unsorted[j] > unsorted[j + 1]:
snake_case , snake_case : Dict = unsorted[j + 1], unsorted[j]
snake_case : Tuple = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Any = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase : Optional[int] = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 684 | 1 |
from __future__ import annotations
lowerCamelCase : List[Any] = [-1_0, -5, 0, 5, 5.1, 1_1, 1_3, 2_1, 3, 4, -2_1, -1_0, -5, -1, 0]
lowerCamelCase : int = [-5, 0, 5, 5.1, 1_1, 1_3, 2_1, -1, 4, -1, -1_0, -5, -1, 0, -1]
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list[float]:
snake_case : List[str] = []
snake_case : Dict = len(lowercase )
for i in range(lowercase ):
snake_case : float = -1
for j in range(i + 1 ,lowercase ):
if arr[i] < arr[j]:
snake_case : List[Any] = arr[j]
break
result.append(lowercase )
return result
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list[float]:
snake_case : Union[str, Any] = []
for i, outer in enumerate(lowercase ):
snake_case : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
snake_case : Tuple = inner
break
result.append(lowercase )
return result
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list[float]:
snake_case : Optional[Any] = len(lowercase )
snake_case : list[float] = []
snake_case : list[float] = [-1] * arr_size
for index in reversed(range(lowercase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
snake_case : Optional[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCamelCase : List[str] = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 684 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Any = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
lowerCamelCase : Any = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
lowerCamelCase : Optional[int] = {
'jukebox': 5_1_2,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_LYRIC_TOKENS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A , A , A=["v3", "v2", "v2"] , A=5_1_2 , A=5 , A="<|endoftext|>" , **A , ) -> Optional[Any]:
snake_case : Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
super().__init__(
unk_token=A , n_genres=A , version=A , max_n_lyric_tokens=A , **A , )
snake_case : Optional[Any] = version
snake_case : Optional[Any] = max_n_lyric_tokens
snake_case : Tuple = n_genres
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : Union[str, Any] = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : str = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : List[str] = json.load(A )
snake_case : Tuple = r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 7_9:
snake_case : Optional[Any] = oov.replace(r"""\-'""" , r"""\-+'""" )
snake_case : Optional[Any] = regex.compile(A )
snake_case : Optional[Any] = {v: k for k, v in self.artists_encoder.items()}
snake_case : int = {v: k for k, v in self.genres_encoder.items()}
snake_case : List[Any] = {v: k for k, v in self.lyrics_encoder.items()}
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def UpperCAmelCase ( self ) -> str:
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def UpperCAmelCase ( self , A , A , A ) -> Optional[Any]:
snake_case : Optional[int] = [self.artists_encoder.get(A , 0 ) for artist in list_artists]
for genres in range(len(A ) ):
snake_case : Optional[int] = [self.genres_encoder.get(A , 0 ) for genre in list_genres[genres]]
snake_case : Union[str, Any] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
snake_case : Optional[Any] = [[self.lyrics_encoder.get(A , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def UpperCAmelCase ( self , A ) -> List[str]:
return list(A )
def UpperCAmelCase ( self , A , A , A , **A ) -> List[str]:
snake_case , snake_case , snake_case : Any = self.prepare_for_tokenization(A , A , A )
snake_case : Tuple = self._tokenize(A )
return artist, genre, lyrics
def UpperCAmelCase ( self , A , A , A , A = False ) -> Tuple[str, str, str, Dict[str, Any]]:
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
snake_case : Tuple = artists[idx].lower()
snake_case : List[Any] = [genres[idx].lower()]
else:
snake_case : Union[str, Any] = self._normalize(artists[idx] ) + """.v2"""
snake_case : Any = [
self._normalize(A ) + """.v2""" for genre in genres[idx].split("""_""" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
snake_case : str = regex.compile(r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" )
snake_case : Dict = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
snake_case : Union[str, Any] = {vocab[index]: index + 1 for index in range(len(A ) )}
snake_case : Optional[int] = 0
snake_case : Union[str, Any] = len(A ) + 1
snake_case : Optional[int] = self.vocab
snake_case : str = {v: k for k, v in self.vocab.items()}
snake_case : int = """"""
else:
snake_case : Optional[int] = regex.compile(r"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""" )
snake_case : int = self._run_strip_accents(A )
snake_case : Any = lyrics.replace("""\\""" , """\n""" )
snake_case : Tuple = self.out_of_vocab.sub("""""" , A ), [], []
return artists, genres, lyrics
def UpperCAmelCase ( self , A ) -> List[Any]:
snake_case : int = unicodedata.normalize("""NFD""" , A )
snake_case : int = []
for char in text:
snake_case : Optional[Any] = unicodedata.category(A )
if cat == "Mn":
continue
output.append(A )
return "".join(A )
def UpperCAmelCase ( self , A ) -> str:
snake_case : Dict = (
[chr(A ) for i in range(ord("""a""" ) , ord("""z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""A""" ) , ord("""Z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""0""" ) , ord("""9""" ) + 1 )]
+ ["""."""]
)
snake_case : Dict = frozenset(A )
snake_case : Dict = re.compile(r"""_+""" )
snake_case : str = """""".join([c if c in accepted else """_""" for c in text.lower()] )
snake_case : List[Any] = pattern.sub("""_""" , A ).strip("""_""" )
return text
def UpperCAmelCase ( self , A ) -> str:
return " ".join(A )
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[Any]:
# Convert to TensorType
if not isinstance(A , A ):
snake_case : Tuple = TensorType(A )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""" )
import tensorflow as tf
snake_case : Union[str, Any] = tf.constant
snake_case : int = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""" )
import torch
snake_case : List[str] = torch.tensor
snake_case : Optional[Any] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""" )
import jax.numpy as jnp # noqa: F811
snake_case : Optional[int] = jnp.array
snake_case : Dict = _is_jax
else:
snake_case : List[str] = np.asarray
snake_case : Tuple = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
snake_case : Any = [inputs]
if not is_tensor(A ):
snake_case : List[Any] = as_tensor(A )
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""" )
return inputs
def __call__( self , A , A , A="" , A="pt" ) -> BatchEncoding:
snake_case : List[str] = [0, 0, 0]
snake_case : List[str] = [artist] * len(self.version )
snake_case : List[Any] = [genres] * len(self.version )
snake_case , snake_case , snake_case : Optional[int] = self.tokenize(A , A , A )
snake_case , snake_case , snake_case : int = self._convert_token_to_id(A , A , A )
snake_case : Any = [-INFINITY] * len(full_tokens[-1] )
snake_case : int = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=A )
for i in range(len(self.version ) )
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks} )
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=A ) )
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=A ) )
snake_case : Tuple = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=A ) )
return (artists_file, genres_file, lyrics_file)
def UpperCAmelCase ( self , A , A , A ) -> List[Any]:
snake_case : Optional[int] = self.artists_decoder.get(A )
snake_case : Optional[Any] = [self.genres_decoder.get(A ) for genre in genres_index]
snake_case : Optional[int] = [self.lyrics_decoder.get(A ) for character in lyric_index]
return artist, genres, lyrics
| 684 | 1 |
import requests
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> None:
snake_case : Any = {"""Content-Type""": """application/json"""}
snake_case : Optional[int] = requests.post(SCREAMING_SNAKE_CASE_ ,json={"""text""": message_body} ,headers=SCREAMING_SNAKE_CASE_ )
if response.status_code != 200:
snake_case : List[str] = (
"""Request to slack returned an error """
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 700 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
snake_case : str = len(lowercase )
snake_case : Tuple = []
for i in range(len(lowercase ) - pat_len + 1 ):
snake_case : str = True
for j in range(lowercase ):
if s[i + j] != pattern[j]:
snake_case : Dict = False
break
if match_found:
position.append(lowercase )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 684 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Dict:
if number > 0:
raise ValueError("""input must be a negative integer""" )
snake_case : List[str] = len(bin(__A )[3:] )
snake_case : Optional[Any] = bin(abs(__A ) - (1 << binary_number_length) )[3:]
snake_case : int = (
(
"""1"""
+ """0""" * (binary_number_length - len(__A ))
+ twos_complement_number
)
if number < 0
else """0"""
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
import numpy as np
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> int:
snake_case : Tuple = tempfile.mkdtemp()
snake_case : List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
snake_case : int = {
"""do_resize""": True,
"""size""": 2_0,
"""do_center_crop""": True,
"""crop_size""": 1_8,
"""do_normalize""": True,
"""image_mean""": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"""image_std""": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
snake_case : str = os.path.join(self.tmpdirname , UpperCAmelCase_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCAmelCase ( self , **A ) -> Dict:
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def UpperCAmelCase ( self , **A ) -> str:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def UpperCAmelCase ( self , **A ) -> Dict:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Union[str, Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
snake_case : Any = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : str = self.get_tokenizer()
snake_case : Optional[int] = self.get_rust_tokenizer()
snake_case : int = self.get_image_processor()
snake_case : int = AlignProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
snake_case : int = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_ )
snake_case : List[Any] = AlignProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
snake_case : Optional[int] = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_ )
def UpperCAmelCase ( self ) -> int:
snake_case : str = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case : Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
snake_case : Any = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0 )
snake_case : str = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase_ )
def UpperCAmelCase ( self ) -> List[str]:
snake_case : str = self.get_image_processor()
snake_case : Union[str, Any] = self.get_tokenizer()
snake_case : str = AlignProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
snake_case : str = self.prepare_image_inputs()
snake_case : List[Any] = image_processor(UpperCAmelCase_ , return_tensors="""np""" )
snake_case : str = processor(images=UpperCAmelCase_ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : List[Any] = self.get_image_processor()
snake_case : Optional[int] = self.get_tokenizer()
snake_case : List[Any] = AlignProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
snake_case : Dict = """lower newer"""
snake_case : Any = processor(text=UpperCAmelCase_ )
snake_case : Dict = tokenizer(UpperCAmelCase_ , padding="""max_length""" , max_length=6_4 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : List[str] = self.get_image_processor()
snake_case : str = self.get_tokenizer()
snake_case : Union[str, Any] = AlignProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
snake_case : str = """lower newer"""
snake_case : Tuple = self.prepare_image_inputs()
snake_case : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def UpperCAmelCase ( self ) -> int:
snake_case : Optional[Any] = self.get_image_processor()
snake_case : Optional[Any] = self.get_tokenizer()
snake_case : Optional[int] = AlignProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
snake_case : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case : Tuple = processor.batch_decode(UpperCAmelCase_ )
snake_case : List[Any] = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Optional[Any] = self.get_image_processor()
snake_case : Union[str, Any] = self.get_tokenizer()
snake_case : List[str] = AlignProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
snake_case : Tuple = """lower newer"""
snake_case : str = self.prepare_image_inputs()
snake_case : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 702 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase : Tuple = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 684 | 0 |
class __lowercase :
"""simple docstring"""
def __init__( self , A , A , A ) -> Union[str, Any]:
snake_case : Any = None
snake_case : List[str] = None
snake_case : str = graph
self._normalize_graph(A_ , A_ )
snake_case : Any = len(A_ )
snake_case : Optional[int] = None
def UpperCAmelCase ( self , A , A ) -> str:
if sources is int:
snake_case : List[str] = [sources]
if sinks is int:
snake_case : List[Any] = [sinks]
if len(A_ ) == 0 or len(A_ ) == 0:
return
snake_case : Optional[int] = sources[0]
snake_case : Dict = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(A_ ) > 1 or len(A_ ) > 1:
snake_case : Optional[Any] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
snake_case : Tuple = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
snake_case : Any = max_input_flow
snake_case : Any = 0
snake_case : List[str] = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
snake_case : Dict = max_input_flow
snake_case : Tuple = size - 1
def UpperCAmelCase ( self ) -> Dict:
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def UpperCAmelCase ( self , A ) -> List[Any]:
snake_case : Union[str, Any] = algorithm(self )
class __lowercase :
"""simple docstring"""
def __init__( self , A ) -> List[Any]:
snake_case : List[Any] = flow_network
snake_case : Optional[int] = flow_network.verticesCount
snake_case : Optional[Any] = flow_network.sourceIndex
snake_case : Tuple = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
snake_case : List[str] = flow_network.graph
snake_case : Tuple = False
def UpperCAmelCase ( self ) -> Dict:
if not self.executed:
self._algorithm()
snake_case : str = True
def UpperCAmelCase ( self ) -> Dict:
pass
class __lowercase (_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , A ) -> List[Any]:
super().__init__(A_ )
# use this to save your result
snake_case : str = -1
def UpperCAmelCase ( self ) -> Optional[int]:
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class __lowercase (_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , A ) -> List[str]:
super().__init__(A_ )
snake_case : str = [[0] * self.verticies_count for i in range(self.verticies_count )]
snake_case : int = [0] * self.verticies_count
snake_case : Optional[int] = [0] * self.verticies_count
def UpperCAmelCase ( self ) -> List[str]:
snake_case : int = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
snake_case : str = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
snake_case : Any = 0
while i < len(A_ ):
snake_case : Union[str, Any] = vertices_list[i]
snake_case : int = self.heights[vertex_index]
self.process_vertex(A_ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(A_ ) )
snake_case : Any = 0
else:
i += 1
snake_case : Tuple = sum(self.preflow[self.source_index] )
def UpperCAmelCase ( self , A ) -> List[Any]:
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(A_ , A_ )
self.relabel(A_ )
def UpperCAmelCase ( self , A , A ) -> Dict:
snake_case : int = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def UpperCAmelCase ( self , A ) -> Tuple:
snake_case : Optional[int] = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
snake_case : str = self.heights[to_index]
if min_height is not None:
snake_case : str = min_height + 1
if __name__ == "__main__":
lowerCamelCase : str = [0]
lowerCamelCase : List[str] = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
lowerCamelCase : Optional[Any] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
lowerCamelCase : Any = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
lowerCamelCase : str = flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 703 |
lowerCamelCase : Union[str, Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCamelCase : Tuple = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCamelCase : Union[str, Any] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 684 | 0 |
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowerCamelCase : List[str] = logging.getLogger(__name__)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , A , A , A , A=None ) -> Optional[int]:
super().__init__(
UpperCamelCase_ , question_encoder_tokenizer=UpperCamelCase_ , generator_tokenizer=UpperCamelCase_ , index=UpperCamelCase_ , init_retrieval=UpperCamelCase_ , )
snake_case : Optional[int] = None
def UpperCAmelCase ( self , A ) -> Any:
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
snake_case : int = self._infer_socket_ifname()
# avoid clash with the NCCL port
snake_case : Union[str, Any] = str(distributed_port + 1 )
snake_case : Optional[int] = dist.new_group(ranks=UpperCamelCase_ , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def UpperCAmelCase ( self ) -> Dict:
return dist.get_rank(group=self.process_group ) == 0
def UpperCAmelCase ( self , A , A , A=torch.floataa ) -> Tuple:
snake_case : Union[str, Any] = torch.empty(UpperCamelCase_ , dtype=UpperCamelCase_ )
dist.scatter(UpperCamelCase_ , src=0 , scatter_list=UpperCamelCase_ , group=self.process_group )
return target_tensor
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Union[str, Any] = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
snake_case : List[str] = next((addr for addr in addrs if addr.startswith("""e""" )) , UpperCamelCase_ )
return ifname
def UpperCAmelCase ( self , A , A ) -> Tuple[np.ndarray, List[dict]]:
if not dist.is_initialized():
snake_case : Optional[Any] = self._main_retrieve(UpperCamelCase_ , UpperCamelCase_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCamelCase_ )
# distributed training
snake_case : int = dist.get_world_size(group=self.process_group )
# gather logic
snake_case : Any = None
if self._is_main():
snake_case : Optional[int] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(UpperCamelCase_ )]
dist.gather(torch.tensor(UpperCamelCase_ ) , dst=0 , gather_list=UpperCamelCase_ , group=self.process_group )
# scatter logic
snake_case : Optional[Any] = question_hidden_states.shape[0]
snake_case : Optional[Any] = []
snake_case : int = []
if self._is_main():
assert len(UpperCamelCase_ ) == world_size
snake_case : Any = self._main_retrieve(torch.cat(UpperCamelCase_ ).numpy() , UpperCamelCase_ )
snake_case : int = torch.tensor(UpperCamelCase_ ), torch.tensor(UpperCamelCase_ )
snake_case : str = self._chunk_tensor(UpperCamelCase_ , UpperCamelCase_ )
snake_case : int = self._chunk_tensor(UpperCamelCase_ , UpperCamelCase_ )
snake_case : List[Any] = self._scattered(UpperCamelCase_ , [n_queries, n_docs] , target_type=torch.intaa )
snake_case : List[Any] = self._scattered(UpperCamelCase_ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(UpperCamelCase_ )
| 704 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'vocab_file': 'spm_char.model'}
lowerCamelCase : List[str] = {
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
lowerCamelCase : List[Any] = {
'microsoft/speecht5_asr': 1_0_2_4,
'microsoft/speecht5_tts': 1_0_2_4,
'microsoft/speecht5_vc': 1_0_2_4,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A="<s>" , A="</s>" , A="<unk>" , A="<pad>" , A = None , **A , ) -> None:
snake_case : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , pad_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
snake_case : Tuple = vocab_file
snake_case : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def UpperCAmelCase ( self ) -> List[Any]:
return self.sp_model.get_piece_size()
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Any = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[str]:
snake_case : Optional[Any] = self.__dict__.copy()
snake_case : Optional[Any] = None
return state
def __setstate__( self , A ) -> Tuple:
snake_case : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case : List[Any] = {}
snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def UpperCAmelCase ( self , A ) -> Tuple:
return self.sp_model.piece_to_id(A )
def UpperCAmelCase ( self , A ) -> int:
snake_case : Union[str, Any] = self.sp_model.IdToPiece(A )
return token
def UpperCAmelCase ( self , A ) -> Tuple:
snake_case : Optional[int] = []
snake_case : str = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A ) + token
snake_case : Dict = []
else:
current_sub_tokens.append(A )
out_string += self.sp_model.decode(A )
return out_string.strip()
def UpperCAmelCase ( self , A , A=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
snake_case : Any = [1]
if token_ids_a is None:
return ([0] * len(A )) + suffix_ones
return ([0] * len(A )) + ([0] * len(A )) + suffix_ones
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Optional[Any] = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , """wb""" ) as fi:
snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 684 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowercase = 3 ,lowercase = 7 ,lowercase = 1000000 ) -> Union[str, Any]:
snake_case : List[str] = 0
snake_case : List[str] = 1
for current_denominator in range(1 ,limit + 1 ):
snake_case : Optional[Any] = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
snake_case : str = current_numerator
snake_case : Tuple = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_0_0_0_0_0_0))
| 705 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """gpt_neox_japanese"""
def __init__( self , A=3_2_0_0_0 , A=2_5_6_0 , A=3_2 , A=3_2 , A=4 , A="gelu" , A=1.00 , A=1_0_0_0_0 , A=2_0_4_8 , A=0.02 , A=1e-5 , A=True , A=3_1_9_9_6 , A=3_1_9_9_9 , A=0.1 , A=0.0 , **A , ) -> str:
super().__init__(bos_token_id=A , eos_token_id=A , **A )
snake_case : Optional[Any] = vocab_size
snake_case : Optional[Any] = max_position_embeddings
snake_case : Union[str, Any] = hidden_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : Optional[int] = intermediate_multiple_size
snake_case : int = hidden_act
snake_case : str = rotary_pct
snake_case : Optional[Any] = rotary_emb_base
snake_case : Any = initializer_range
snake_case : Any = layer_norm_eps
snake_case : Optional[Any] = use_cache
snake_case : Tuple = attention_dropout
snake_case : Tuple = hidden_dropout
| 684 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> tuple[float, float]:
# Check if the input is valid
if not len(lowerCamelCase_ ) == len(lowerCamelCase_ ) == 3:
raise ValueError("""Please enter a valid equation.""" )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("""Both a & b of two equations can\'t be zero.""" )
# Extract the coefficients
snake_case : str = equationa
snake_case : List[Any] = equationa
# Calculate the determinants of the matrices
snake_case : Optional[int] = aa * ba - aa * ba
snake_case : Union[str, Any] = ca * ba - ca * ba
snake_case : Union[str, Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("""Infinite solutions. (Consistent system)""" )
else:
raise ValueError("""No solution. (Inconsistent system)""" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
snake_case : Tuple = determinant_x / determinant
snake_case : Optional[int] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 706 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Optional[Any] = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
snake_case : Any = hex_num[0] == """-"""
if is_negative:
snake_case : int = hex_num[1:]
try:
snake_case : List[Any] = int(lowercase ,16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
snake_case : Dict = """"""
while int_num > 0:
snake_case : Dict = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 0 |
class __lowercase :
"""simple docstring"""
def __init__( self , A ) -> Union[str, Any]:
snake_case : Dict = n
snake_case : Dict = [None] * self.n
snake_case : Tuple = 0 # index of the first element
snake_case : List[Any] = 0
snake_case : List[Any] = 0
def __len__( self ) -> int:
return self.size
def UpperCAmelCase ( self ) -> bool:
return self.size == 0
def UpperCAmelCase ( self ) -> List[Any]:
return False if self.is_empty() else self.array[self.front]
def UpperCAmelCase ( self , A ) -> Any:
if self.size >= self.n:
raise Exception("""QUEUE IS FULL""" )
snake_case : Tuple = data
snake_case : List[Any] = (self.rear + 1) % self.n
self.size += 1
return self
def UpperCAmelCase ( self ) -> Any:
if self.size == 0:
raise Exception("""UNDERFLOW""" )
snake_case : Dict = self.array[self.front]
snake_case : Tuple = None
snake_case : int = (self.front + 1) % self.n
self.size -= 1
return temp
| 707 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""pixel_values"""]
def __init__( self , A = True , A = None , A = PIL.Image.BICUBIC , A = True , A = None , A = 1 / 2_5_5 , A = True , A = True , A = None , A = None , **A , ) -> None:
super().__init__(**A )
snake_case : int = size if size is not None else {"""height""": 2_5_6, """width""": 2_5_6}
snake_case : int = get_size_dict(A )
snake_case : Optional[Any] = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
snake_case : Dict = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = do_resize
snake_case : str = size
snake_case : Tuple = resample
snake_case : Any = do_center_crop
snake_case : Tuple = crop_size
snake_case : int = do_rescale
snake_case : Dict = rescale_factor
snake_case : Union[str, Any] = do_normalize
snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase ( self , A , A , A = PIL.Image.BICUBIC , A = None , **A , ) -> np.ndarray:
snake_case : Dict = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
A , size=(size["""height"""], size["""width"""]) , resample=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> np.ndarray:
snake_case : Any = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(A , size=(size["""height"""], size["""width"""]) , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> Tuple:
return rescale(A , scale=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A , A = None , **A , ) -> np.ndarray:
return normalize(A , mean=A , std=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A = None , A = None , A=None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
snake_case : str = do_resize if do_resize is not None else self.do_resize
snake_case : Dict = resample if resample is not None else self.resample
snake_case : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Tuple = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case : int = image_mean if image_mean is not None else self.image_mean
snake_case : List[str] = image_std if image_std is not None else self.image_std
snake_case : Dict = size if size is not None else self.size
snake_case : Tuple = get_size_dict(A )
snake_case : Dict = crop_size if crop_size is not None else self.crop_size
snake_case : List[str] = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case : Optional[Any] = [to_numpy_array(A ) for image in images]
if do_resize:
snake_case : Dict = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_center_crop:
snake_case : List[str] = [self.center_crop(image=A , size=A ) for image in images]
if do_rescale:
snake_case : List[str] = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
snake_case : str = [self.normalize(image=A , mean=A , std=A ) for image in images]
snake_case : Union[str, Any] = [to_channel_dimension_format(A , A ) for image in images]
snake_case : List[Any] = {"""pixel_values""": images}
return BatchFeature(data=A , tensor_type=A )
| 684 | 0 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@require_torch
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Optional[int] = pipeline(
task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""" )
snake_case : Optional[int] = load_dataset("""ashraq/esc50""" )
snake_case : int = dataset['''train''']['''audio'''][-1]['''array''']
snake_case : Union[str, Any] = audio_classifier(A , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(A ) , [{"""score""": 0.5_01, """label""": """Sound of a dog"""}, {"""score""": 0.4_99, """label""": """Sound of vaccum cleaner"""}] , )
@unittest.skip("""No models are available in TF""" )
def UpperCAmelCase ( self ) -> Any:
pass
@slow
@require_torch
def UpperCAmelCase ( self ) -> Any:
snake_case : str = pipeline(
task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , )
# This is an audio of a dog
snake_case : Tuple = load_dataset("""ashraq/esc50""" )
snake_case : int = dataset['''train''']['''audio'''][-1]['''array''']
snake_case : Optional[Any] = audio_classifier(A , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(A ) , [
{"""score""": 0.9_99, """label""": """Sound of a dog"""},
{"""score""": 0.0_01, """label""": """Sound of vaccum cleaner"""},
] , )
snake_case : Union[str, Any] = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(A ) , [
[
{"""score""": 0.9_99, """label""": """Sound of a dog"""},
{"""score""": 0.0_01, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
snake_case : Union[str, Any] = audio_classifier(
[audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5 )
self.assertEqual(
nested_simplify(A ) , [
[
{"""score""": 0.9_99, """label""": """Sound of a dog"""},
{"""score""": 0.0_01, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
@unittest.skip("""No models are available in TF""" )
def UpperCAmelCase ( self ) -> Any:
pass
| 708 |
import inspect
import unittest
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase ( self ) -> Tuple:
import diffusers
from diffusers.dependency_versions_table import deps
snake_case : List[str] = inspect.getmembers(A , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
snake_case : Tuple = """k-diffusion"""
elif backend == "invisible_watermark":
snake_case : Optional[int] = """invisible-watermark"""
assert backend in deps, f"""{backend} is not in the deps table!"""
| 684 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> int:
snake_case : List[str] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCAmelCase , """tf_padding""" ) )
self.parent.assertTrue(hasattr(_lowerCAmelCase , """depth_multiplier""" ) )
class __lowercase :
"""simple docstring"""
def __init__( self , A , A=1_3 , A=3 , A=3_2 , A=0.25 , A=8 , A=8 , A=6 , A=3_2 , A=True , A=True , A=True , A="relu6" , A=1_2_8_0 , A=0.1 , A=0.02 , A=True , A=True , A=1_0 , A=None , ) -> List[Any]:
snake_case : Optional[int] = parent
snake_case : Any = batch_size
snake_case : Any = num_channels
snake_case : Dict = image_size
snake_case : Dict = depth_multiplier
snake_case : List[str] = depth_divisible_by
snake_case : int = min_depth
snake_case : Union[str, Any] = expand_ratio
snake_case : Union[str, Any] = tf_padding
snake_case : str = output_stride
snake_case : Dict = first_layer_is_expansion
snake_case : List[Any] = finegrained_output
snake_case : Tuple = hidden_act
snake_case : str = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
snake_case : Tuple = classifier_dropout_prob
snake_case : Tuple = use_labels
snake_case : Tuple = is_training
snake_case : Any = num_labels
snake_case : int = initializer_range
snake_case : Any = scope
def UpperCAmelCase ( self ) -> Any:
snake_case : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case : Optional[int] = None
snake_case : List[Any] = None
if self.use_labels:
snake_case : str = ids_tensor([self.batch_size] , self.num_labels )
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase ( self ) -> Tuple:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self , A , A , A , A ) -> List[str]:
snake_case : Any = MobileNetVaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
snake_case : Any = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def UpperCAmelCase ( self , A , A , A , A ) -> Dict:
snake_case : Union[str, Any] = self.num_labels
snake_case : str = MobileNetVaForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
snake_case : List[str] = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self , A , A , A , A ) -> List[Any]:
snake_case : Dict = self.num_labels
snake_case : int = MobileNetVaForSemanticSegmentation(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
snake_case : Optional[int] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
snake_case : List[str] = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase ( self ) -> Any:
snake_case : int = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case : Dict = config_and_inputs
snake_case : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __lowercase (UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_snake_case = (
{
"""feature-extraction""": MobileNetVaModel,
"""image-classification""": MobileNetVaForImageClassification,
"""image-segmentation""": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def UpperCAmelCase ( self ) -> Dict:
snake_case : Optional[Any] = MobileNetVaModelTester(self )
snake_case : List[str] = MobileNetVaConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def UpperCAmelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def UpperCAmelCase ( self ) -> str:
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def UpperCAmelCase ( self ) -> Any:
pass
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case , snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Tuple = model_class(_lowerCAmelCase )
snake_case : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : List[Any] = [*signature.parameters.keys()]
snake_case : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def UpperCAmelCase ( self ) -> str:
def check_hidden_states_output(A , A , A ):
snake_case : Optional[Any] = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
snake_case : Union[str, Any] = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
snake_case : Tuple = outputs.hidden_states
snake_case : List[Any] = 1_6
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
snake_case , snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : List[str] = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case : Optional[int] = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCAmelCase )
@slow
def UpperCAmelCase ( self ) -> List[Any]:
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : List[Any] = MobileNetVaModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
snake_case : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self ) -> Optional[Any]:
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : int = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(_lowerCAmelCase )
snake_case : Optional[int] = self.default_image_processor
snake_case : Optional[Any] = prepare_img()
snake_case : Union[str, Any] = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
snake_case : int = model(**_lowerCAmelCase )
# verify the logits
snake_case : Optional[int] = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
snake_case : int = torch.tensor([0.24_45, -1.19_93, 0.19_05] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@slow
def UpperCAmelCase ( self ) -> List[str]:
snake_case : str = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
snake_case : List[str] = model.to(_lowerCAmelCase )
snake_case : Dict = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
snake_case : Any = prepare_img()
snake_case : int = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
snake_case : str = model(**_lowerCAmelCase )
snake_case : List[Any] = outputs.logits
# verify the logits
snake_case : List[Any] = torch.Size((1, 2_1, 6_5, 6_5) )
self.assertEqual(logits.shape , _lowerCAmelCase )
snake_case : Optional[Any] = torch.tensor(
[
[[17.57_90, 17.75_81, 18.33_55], [18.32_57, 18.42_30, 18.89_73], [18.61_69, 18.86_50, 19.21_87]],
[[-2.15_95, -2.09_77, -2.37_41], [-2.42_26, -2.30_28, -2.68_35], [-2.78_19, -2.59_91, -2.77_06]],
[[4.20_58, 4.83_17, 4.76_38], [4.41_36, 5.03_61, 4.93_83], [4.50_28, 4.96_44, 4.87_34]],
] , device=_lowerCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 709 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
lowerCamelCase : Union[str, Any] = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
lowerCamelCase : List[Any] = 'main'
# Default branch name
lowerCamelCase : Tuple = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
lowerCamelCase : List[Any] = 'aaaaaaa'
# This commit does not exist, so we should 404.
lowerCamelCase : List[Any] = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
lowerCamelCase : int = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> int:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> Optional[Any]:
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def UpperCAmelCase ( self ) -> Optional[Any]:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_tf
def UpperCAmelCase ( self ) -> str:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_flax
def UpperCAmelCase ( self ) -> Any:
# Flax models don't have labels
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , [] )
| 684 | 0 |
from __future__ import annotations
lowerCamelCase : Optional[Any] = 8.988e9 # units = N * m^s * C^-2
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> dict[str, float]:
snake_case : Optional[int] = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if distance < 0:
raise ValueError("""Distance cannot be negative""" )
if force == 0:
snake_case : Any = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
snake_case : List[str] = abs(lowerCamelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
snake_case : str = abs(lowerCamelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
snake_case : Union[str, Any] = (COULOMBS_CONSTANT * charge_product / abs(lowerCamelCase__ )) ** 0.5
return {"distance": distance}
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """audio-spectrogram-transformer"""
def __init__( self , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.0 , A=0.0 , A=0.02 , A=1e-1_2 , A=1_6 , A=True , A=1_0 , A=1_0 , A=1_0_2_4 , A=1_2_8 , **A , ) -> int:
super().__init__(**A )
snake_case : Any = hidden_size
snake_case : Tuple = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Dict = intermediate_size
snake_case : int = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : int = initializer_range
snake_case : int = layer_norm_eps
snake_case : Any = patch_size
snake_case : List[Any] = qkv_bias
snake_case : int = frequency_stride
snake_case : Any = time_stride
snake_case : Union[str, Any] = max_length
snake_case : Any = num_mel_bins
| 684 | 0 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : int = {
'vocab_file': 'vocab.txt',
'merges_file': 'bpe.codes',
}
lowerCamelCase : Optional[Any] = {
'vocab_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt',
},
'merges_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes',
},
}
lowerCamelCase : Optional[Any] = {
'vinai/phobert-base': 2_5_6,
'vinai/phobert-large': 2_5_6,
}
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Union[str, Any]:
snake_case : Tuple = set()
snake_case : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case : Any = char
snake_case : str = set(_lowerCAmelCase )
return pairs
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , A , A , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , **A , ) -> Dict:
super().__init__(
bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , **__A , )
snake_case : str = vocab_file
snake_case : str = merges_file
snake_case : List[str] = {}
snake_case : Any = 0
snake_case : Optional[int] = 1
snake_case : List[Any] = 2
snake_case : Optional[int] = 3
self.add_from_file(__A )
snake_case : Union[str, Any] = {v: k for k, v in self.encoder.items()}
with open(__A , encoding="""utf-8""" ) as merges_handle:
snake_case : Optional[int] = merges_handle.read().split("""\n""" )[:-1]
snake_case : Dict = [tuple(merge.split()[:-1] ) for merge in merges]
snake_case : Dict = dict(zip(__A , range(len(__A ) ) ) )
snake_case : Optional[int] = {}
def UpperCAmelCase ( self , A , A = None ) -> Optional[Any]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case : Dict = [self.cls_token_id]
snake_case : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase ( self , A , A = None , A = False ) -> str:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def UpperCAmelCase ( self , A , A = None ) -> Union[str, Any]:
snake_case : List[Any] = [self.sep_token_id]
snake_case : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase ( self ) -> List[str]:
return len(self.encoder )
def UpperCAmelCase ( self ) -> int:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase ( self , A ) -> List[Any]:
if token in self.cache:
return self.cache[token]
snake_case : Union[str, Any] = tuple(__A )
snake_case : str = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
snake_case : Tuple = get_pairs(__A )
if not pairs:
return token
while True:
snake_case : int = min(__A , key=lambda A : self.bpe_ranks.get(__A , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case : Optional[Any] = bigram
snake_case : List[Any] = []
snake_case : List[str] = 0
while i < len(__A ):
try:
snake_case : List[Any] = word.index(__A , __A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case : List[Any] = j
if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case : Any = tuple(__A )
snake_case : Any = new_word
if len(__A ) == 1:
break
else:
snake_case : Dict = get_pairs(__A )
snake_case : Tuple = "@@ ".join(__A )
snake_case : Tuple = word[:-4]
snake_case : Optional[int] = word
return word
def UpperCAmelCase ( self , A ) -> Tuple:
snake_case : int = []
snake_case : Any = re.findall(r"""\S+\n?""" , __A )
for token in words:
split_tokens.extend(list(self.bpe(__A ).split(""" """ ) ) )
return split_tokens
def UpperCAmelCase ( self , A ) -> Any:
return self.encoder.get(__A , self.encoder.get(self.unk_token ) )
def UpperCAmelCase ( self , A ) -> Optional[int]:
return self.decoder.get(__A , self.unk_token )
def UpperCAmelCase ( self , A ) -> Optional[Any]:
snake_case : Dict = " ".join(__A ).replace("""@@ """ , """""" ).strip()
return out_string
def UpperCAmelCase ( self , A , A = None ) -> Tuple:
if not os.path.isdir(__A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : List[str] = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case : Union[str, Any] = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file , __A )
if os.path.abspath(self.merges_file ) != os.path.abspath(__A ):
copyfile(self.merges_file , __A )
return out_vocab_file, out_merge_file
def UpperCAmelCase ( self , A ) -> Union[str, Any]:
if isinstance(__A , __A ):
try:
with open(__A , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(__A )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f"""Incorrect encoding detected in {f}, please rebuild the dataset""" )
return
snake_case : Optional[int] = f.readlines()
for lineTmp in lines:
snake_case : List[str] = lineTmp.strip()
snake_case : int = line.rfind(""" """ )
if idx == -1:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt>'""" )
snake_case : Tuple = line[:idx]
snake_case : Tuple = len(self.encoder )
| 711 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase : Any = logging.get_logger(__name__)
class __lowercase (enum.Enum ):
"""simple docstring"""
_snake_case = 0
_snake_case = 1
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """generated"""
def __init__( self , *A , **A ) -> Optional[Any]:
super().__init__(*A , **A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase ( self , A=None , A=None , A=None , A=None , A=None , A=None , **A , ) -> Optional[int]:
snake_case : Tuple = {}
if truncation is not None:
snake_case : Union[str, Any] = truncation
snake_case : Dict = generate_kwargs
snake_case : int = {}
if return_tensors is not None and return_type is None:
snake_case : List[Any] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
snake_case : List[str] = return_type
if clean_up_tokenization_spaces is not None:
snake_case : int = clean_up_tokenization_spaces
if stop_sequence is not None:
snake_case : Tuple = self.tokenizer.encode(A , add_special_tokens=A )
if len(A ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
snake_case : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
return True
def UpperCAmelCase ( self , *A , A ) -> Tuple:
snake_case : Union[str, Any] = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , A ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
snake_case : Union[str, Any] = ([prefix + arg for arg in args[0]],)
snake_case : List[Any] = True
elif isinstance(args[0] , A ):
snake_case : str = (prefix + args[0],)
snake_case : str = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
snake_case : Optional[Any] = self.tokenizer(*A , padding=A , truncation=A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *A , **A ) -> Union[str, Any]:
snake_case : Tuple = super().__call__(*A , **A )
if (
isinstance(args[0] , A )
and all(isinstance(A , A ) for el in args[0] )
and all(len(A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase ( self , A , A=TruncationStrategy.DO_NOT_TRUNCATE , **A ) -> str:
snake_case : Optional[Any] = self._parse_and_tokenize(A , truncation=A , **A )
return inputs
def UpperCAmelCase ( self , A , **A ) -> Tuple:
if self.framework == "pt":
snake_case , snake_case : List[str] = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
snake_case , snake_case : Optional[Any] = tf.shape(model_inputs["""input_ids"""] ).numpy()
snake_case : Dict = generate_kwargs.get("""min_length""" , self.model.config.min_length )
snake_case : str = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(A , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
snake_case : List[str] = self.model.generate(**A , **A )
snake_case : Dict = output_ids.shape[0]
if self.framework == "pt":
snake_case : List[Any] = output_ids.reshape(A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
snake_case : Any = tf.reshape(A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase ( self , A , A=ReturnType.TEXT , A=False ) -> Union[str, Any]:
snake_case : Tuple = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
snake_case : Dict = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
snake_case : int = {
f"""{self.return_name}_text""": self.tokenizer.decode(
A , skip_special_tokens=A , clean_up_tokenization_spaces=A , )
}
records.append(A )
return records
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """summary"""
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
def UpperCAmelCase ( self , A , A , A ) -> bool:
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """translation"""
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def UpperCAmelCase ( self , *A , A=TruncationStrategy.DO_NOT_TRUNCATE , A=None , A=None ) -> Optional[int]:
if getattr(self.tokenizer , """_build_translation_inputs""" , A ):
return self.tokenizer._build_translation_inputs(
*A , return_tensors=self.framework , truncation=A , src_lang=A , tgt_lang=A )
else:
return super()._parse_and_tokenize(*A , truncation=A )
def UpperCAmelCase ( self , A=None , A=None , **A ) -> Union[str, Any]:
snake_case , snake_case , snake_case : str = super()._sanitize_parameters(**A )
if src_lang is not None:
snake_case : Tuple = src_lang
if tgt_lang is not None:
snake_case : str = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
snake_case : Union[str, Any] = kwargs.get("""task""" , self.task )
snake_case : Any = task.split("""_""" )
if task and len(A ) == 4:
# translation, XX, to YY
snake_case : Optional[Any] = items[1]
snake_case : Dict = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
| 684 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
snake_case : List[Any] = 1
snake_case : int = 1
while repunit:
snake_case : str = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def SCREAMING_SNAKE_CASE__ ( lowercase = 1000000 ) -> int:
snake_case : Dict = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__lowerCAmelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"""{solution() = }""")
| 712 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
snake_case : int = []
for line in lines:
snake_case : Dict = re.sub(R"""#.*""" ,"""""" ,lowercase ) # remove comments
if line:
filtered_lines.append(lowercase )
snake_case : Optional[int] = """\n""".join(lowercase )
# Make a hash from all this code
snake_case : List[str] = full_str.encode("""utf-8""" )
return shaaaa(lowercase ).hexdigest()
# get importable module names and hash for caching
lowerCamelCase : Any = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
lowerCamelCase : Optional[int] = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
lowerCamelCase : Tuple = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
lowerCamelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 684 | 0 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __lowercase (_snake_case ):
"""simple docstring"""
def __init__( self , A , A = None , A = None , A = None , A = False , A = False , A = None , A = None , **A , ) -> str:
super().__init__(
A , split=A , features=A , cache_dir=A , keep_in_memory=A , streaming=A , num_proc=A , **A , )
snake_case : Optional[int] = field
snake_case : Optional[int] = path_or_paths if isinstance(A , A ) else {self.split: path_or_paths}
snake_case : int = Json(
cache_dir=A , data_files=A , features=A , field=A , **A , )
def UpperCAmelCase ( self ) -> int:
if self.streaming:
snake_case : List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
snake_case : Dict = None
snake_case : List[str] = None
snake_case : Tuple = None
snake_case : Any = None
self.builder.download_and_prepare(
download_config=A , download_mode=A , verification_mode=A , base_path=A , num_proc=self.num_proc , )
snake_case : Tuple = self.builder.as_dataset(
split=self.split , verification_mode=A , in_memory=self.keep_in_memory )
return dataset
class __lowercase :
"""simple docstring"""
def __init__( self , A , A , A = None , A = None , **A , ) -> Optional[int]:
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
snake_case : Union[str, Any] = dataset
snake_case : Optional[Any] = path_or_buf
snake_case : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
snake_case : List[Any] = num_proc
snake_case : int = """utf-8"""
snake_case : Dict = to_json_kwargs
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Optional[Any] = self.to_json_kwargs.pop("""path_or_buf""" , A )
snake_case : Any = self.to_json_kwargs.pop("""orient""" , """records""" )
snake_case : Dict = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
snake_case : Union[str, Any] = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
snake_case : List[Any] = self.to_json_kwargs.pop("""compression""" , A )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=A ) as buffer:
snake_case : Any = self._write(file_obj=A , orient=A , lines=A , index=A , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
""" was passed. Please provide a local path instead.""" )
snake_case : Optional[Any] = self._write(
file_obj=self.path_or_buf , orient=A , lines=A , index=A , **self.to_json_kwargs )
return written
def UpperCAmelCase ( self , A ) -> Union[str, Any]:
snake_case , snake_case , snake_case , snake_case , snake_case : Optional[int] = args
snake_case : Optional[Any] = query_table(
table=self.dataset.data , key=slice(A , offset + self.batch_size ) , indices=self.dataset._indices , )
snake_case : Any = batch.to_pandas().to_json(
path_or_buf=A , orient=A , lines=A , index=A , **A )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCAmelCase ( self , A , A , A , A , **A , ) -> Tuple:
snake_case : List[Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
snake_case : Any = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(A )
else:
snake_case , snake_case : Optional[Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , A , A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(A )
return written
| 713 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Tuple:
# Initialise PyTorch model
snake_case : int = RemBertConfig.from_json_file(lowercase )
print("""Building PyTorch model from configuration: {}""".format(str(lowercase ) ) )
snake_case : Tuple = RemBertModel(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowercase ,lowercase ,lowercase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(lowercase ) )
torch.save(model.state_dict() ,lowercase )
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCamelCase : Dict = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 684 | 0 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase : Dict = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , **A ) -> Dict:
super().__init__(**_UpperCAmelCase )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , """vision""" )
self.check_model_type(_UpperCAmelCase )
def __call__( self , A , A = None , **A , ) -> Tuple:
if "text_queries" in kwargs:
snake_case : Tuple = kwargs.pop("""text_queries""" )
if isinstance(_UpperCAmelCase , (str, Image.Image) ):
snake_case : Dict = {"""image""": image, """candidate_labels""": candidate_labels}
else:
snake_case : Tuple = image
snake_case : Dict = super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
return results
def UpperCAmelCase ( self , **A ) -> Union[str, Any]:
snake_case : Optional[int] = {}
if "threshold" in kwargs:
snake_case : List[str] = kwargs["""threshold"""]
if "top_k" in kwargs:
snake_case : List[Any] = kwargs["""top_k"""]
return {}, {}, postprocess_params
def UpperCAmelCase ( self , A ) -> List[str]:
snake_case : Optional[int] = load_image(inputs["""image"""] )
snake_case : str = inputs["""candidate_labels"""]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
snake_case : List[str] = candidate_labels.split(""",""" )
snake_case : List[str] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(_UpperCAmelCase ):
snake_case : Dict = self.tokenizer(_UpperCAmelCase , return_tensors=self.framework )
snake_case : Any = self.image_processor(_UpperCAmelCase , return_tensors=self.framework )
yield {
"is_last": i == len(_UpperCAmelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def UpperCAmelCase ( self , A ) -> Union[str, Any]:
snake_case : Union[str, Any] = model_inputs.pop("""target_size""" )
snake_case : Optional[int] = model_inputs.pop("""candidate_label""" )
snake_case : List[Any] = model_inputs.pop("""is_last""" )
snake_case : Dict = self.model(**_UpperCAmelCase )
snake_case : int = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def UpperCAmelCase ( self , A , A=0.1 , A=None ) -> Union[str, Any]:
snake_case : int = []
for model_output in model_outputs:
snake_case : Union[str, Any] = model_output["""candidate_label"""]
snake_case : Dict = BaseModelOutput(_UpperCAmelCase )
snake_case : Any = self.image_processor.post_process_object_detection(
outputs=_UpperCAmelCase , threshold=_UpperCAmelCase , target_sizes=model_output["""target_size"""] )[0]
for index in outputs["scores"].nonzero():
snake_case : Tuple = outputs["""scores"""][index].item()
snake_case : Optional[Any] = self._get_bounding_box(outputs["""boxes"""][index][0] )
snake_case : Union[str, Any] = {"""score""": score, """label""": label, """box""": box}
results.append(_UpperCAmelCase )
snake_case : Any = sorted(_UpperCAmelCase , key=lambda A : x["score"] , reverse=_UpperCAmelCase )
if top_k:
snake_case : Union[str, Any] = results[:top_k]
return results
def UpperCAmelCase ( self , A ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" )
snake_case , snake_case , snake_case , snake_case : Tuple = box.int().tolist()
snake_case : List[Any] = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 714 |
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[str]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Union[str, Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Any:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Dict:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
| 684 | 0 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class __lowercase :
"""simple docstring"""
_snake_case = None
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = None
_snake_case = None
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = True
_snake_case = None
_snake_case = 1
_snake_case = None
_snake_case = False
_snake_case = None
_snake_case = None
def UpperCAmelCase ( self ) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(_UpperCamelCase ) for k, v in self.__dict__.items()} )
| 715 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCamelCase : List[str] = 3
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
print("""Generating primitive root of p""" )
while True:
snake_case : Optional[int] = random.randrange(3 ,lowercase )
if pow(lowercase ,2 ,lowercase ) == 1:
continue
if pow(lowercase ,lowercase ,lowercase ) == 1:
continue
return g
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print("""Generating prime p...""" )
snake_case : Optional[int] = rabin_miller.generate_large_prime(lowercase ) # select large prime number.
snake_case : Optional[int] = primitive_root(lowercase ) # one primitive root on modulo p.
snake_case : Optional[Any] = random.randrange(3 ,lowercase ) # private_key -> have to be greater than 2 for safety.
snake_case : Tuple = cryptomath.find_mod_inverse(pow(lowercase ,lowercase ,lowercase ) ,lowercase )
snake_case : str = (key_size, e_a, e_a, p)
snake_case : Optional[Any] = (key_size, d)
return public_key, private_key
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> None:
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print("""\nWARNING:""" )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
snake_case , snake_case : Optional[Any] = generate_key(lowercase )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
print("""Making key files...""" )
make_key_files("""elgamal""" ,2048 )
print("""Key files generation successful""" )
if __name__ == "__main__":
main()
| 684 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __lowercase (a__ ):
"""simple docstring"""
_snake_case = """trocr"""
_snake_case = ["""past_key_values"""]
_snake_case = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self , A=5_0_2_6_5 , A=1_0_2_4 , A=1_2 , A=1_6 , A=4_0_9_6 , A="gelu" , A=5_1_2 , A=0.1 , A=0.0 , A=0.0 , A=2 , A=0.02 , A=0.0 , A=True , A=False , A=True , A=True , A=1 , A=0 , A=2 , **A , ) -> List[Any]:
snake_case : Union[str, Any] = vocab_size
snake_case : Tuple = d_model
snake_case : List[str] = decoder_layers
snake_case : int = decoder_attention_heads
snake_case : Optional[int] = decoder_ffn_dim
snake_case : Optional[Any] = activation_function
snake_case : List[Any] = max_position_embeddings
snake_case : Tuple = dropout
snake_case : Tuple = attention_dropout
snake_case : str = activation_dropout
snake_case : int = init_std
snake_case : Dict = decoder_layerdrop
snake_case : List[str] = use_cache
snake_case : int = scale_embedding
snake_case : Optional[Any] = use_learned_position_embeddings
snake_case : List[Any] = layernorm_embedding
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , decoder_start_token_id=_A , **_A , )
| 716 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
snake_case : Dict = _modexpt(lowercase ,exponent // 2 ,lowercase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowercase ,exponent - 1 ,lowercase )) % modulo_value
def SCREAMING_SNAKE_CASE__ ( lowercase = 1777 ,lowercase = 1855 ,lowercase = 8 ) -> int:
snake_case : int = base
for _ in range(1 ,lowercase ):
snake_case : List[str] = _modexpt(lowercase ,lowercase ,10**digits )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 | 0 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : List[str] = logging.get_logger(__name__)
# TODO Update this
lowerCamelCase : Dict = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __lowercase (_A ):
"""simple docstring"""
_snake_case = """esm"""
def __init__( self , A=None , A=None , A=None , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A=0.1 , A=0.1 , A=1_0_2_6 , A=0.02 , A=1e-1_2 , A="absolute" , A=True , A=None , A=False , A=False , A=None , A=None , **A , ) -> Any:
super().__init__(pad_token_id=__lowerCamelCase , mask_token_id=__lowerCamelCase , **__lowerCamelCase )
snake_case : str = vocab_size
snake_case : Optional[Any] = hidden_size
snake_case : Tuple = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Tuple = intermediate_size
snake_case : List[Any] = hidden_dropout_prob
snake_case : List[str] = attention_probs_dropout_prob
snake_case : List[Any] = max_position_embeddings
snake_case : Any = initializer_range
snake_case : Tuple = layer_norm_eps
snake_case : Tuple = position_embedding_type
snake_case : Dict = use_cache
snake_case : Optional[Any] = emb_layer_norm_before
snake_case : Tuple = token_dropout
snake_case : Optional[int] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
snake_case : Dict = EsmFoldConfig()
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
snake_case : List[str] = EsmFoldConfig(**__lowerCamelCase )
snake_case : str = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
snake_case : str = get_default_vocab_list()
else:
snake_case : List[str] = vocab_list
else:
snake_case : Tuple = None
snake_case : Union[str, Any] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , __lowerCamelCase ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def UpperCAmelCase ( self ) -> str:
snake_case : Tuple = super().to_dict()
if isinstance(self.esmfold_config , __lowerCamelCase ):
snake_case : int = self.esmfold_config.to_dict()
return output
@dataclass
class __lowercase :
"""simple docstring"""
_snake_case = None
_snake_case = True
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = 0
_snake_case = True
_snake_case = False
_snake_case = 128
_snake_case = None
def UpperCAmelCase ( self ) -> Optional[Any]:
if self.trunk is None:
snake_case : Dict = TrunkConfig()
elif isinstance(self.trunk , __lowerCamelCase ):
snake_case : str = TrunkConfig(**self.trunk )
def UpperCAmelCase ( self ) -> str:
snake_case : int = asdict(self )
snake_case : Dict = self.trunk.to_dict()
return output
@dataclass
class __lowercase :
"""simple docstring"""
_snake_case = 48
_snake_case = 1_024
_snake_case = 128
_snake_case = 32
_snake_case = 32
_snake_case = 32
_snake_case = 0
_snake_case = 0
_snake_case = False
_snake_case = 4
_snake_case = 128
_snake_case = None
def UpperCAmelCase ( self ) -> Union[str, Any]:
if self.structure_module is None:
snake_case : Optional[int] = StructureModuleConfig()
elif isinstance(self.structure_module , __lowerCamelCase ):
snake_case : Optional[Any] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
snake_case : List[str] = self.sequence_state_dim // self.sequence_head_width
snake_case : Optional[Any] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def UpperCAmelCase ( self ) -> int:
snake_case : Any = asdict(self )
snake_case : int = self.structure_module.to_dict()
return output
@dataclass
class __lowercase :
"""simple docstring"""
_snake_case = 384
_snake_case = 128
_snake_case = 16
_snake_case = 128
_snake_case = 12
_snake_case = 4
_snake_case = 8
_snake_case = 0.1
_snake_case = 8
_snake_case = 1
_snake_case = 2
_snake_case = 7
_snake_case = 10
_snake_case = 1E-8
_snake_case = 1E5
def UpperCAmelCase ( self ) -> str:
return asdict(self )
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 717 |
from itertools import product
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list[int]:
snake_case : Tuple = sides_number
snake_case : List[str] = max_face_number * dice_number
snake_case : Any = [0] * (max_total + 1)
snake_case : int = 1
snake_case : List[str] = range(lowercase ,max_face_number + 1 )
for dice_numbers in product(lowercase ,repeat=lowercase ):
snake_case : Any = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def SCREAMING_SNAKE_CASE__ ( ) -> float:
snake_case : List[str] = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
snake_case : str = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
snake_case : Optional[int] = 0
snake_case : List[str] = 9
snake_case : Union[str, Any] = 4 * 9
snake_case : Dict = 6
for peter_total in range(lowercase ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
snake_case : str = (4**9) * (6**6)
snake_case : int = peter_wins_count / total_games_number
snake_case : Optional[int] = round(lowercase ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 | 0 |
from collections import namedtuple
lowerCamelCase : List[str] = namedtuple('from_to', 'from_ to')
lowerCamelCase : Any = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.001, 1_0_0_0),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.0_0454, 2_6_4.1_7_2),
"cubicyard": from_to(0.7_6455, 1.3_0795),
"cubicfoot": from_to(0.028, 3_5.3_1_4_7),
"cup": from_to(0.0_0023_6588, 4_2_2_6.7_5),
}
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ """, """.join(a_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ """, """.join(a_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod() | 718 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 684 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowerCamelCase : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self , A , A ) -> Optional[int]:
super().__init__()
self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
@torch.no_grad()
def __call__( self , A = 1 , A = 1_0_0 , A = None , A = None , A = True , ) -> Union[AudioPipelineOutput, Tuple]:
if audio_length_in_s is None:
snake_case : List[str] = self.unet.config.sample_size / self.unet.config.sample_rate
snake_case : Optional[int] = audio_length_in_s * self.unet.config.sample_rate
snake_case : Optional[Any] = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
snake_case : Optional[Any] = int(__UpperCamelCase )
if sample_size % down_scale_factor != 0:
snake_case : Dict = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
""" process.""" )
snake_case : Any = int(__UpperCamelCase )
snake_case : Dict = next(iter(self.unet.parameters() ) ).dtype
snake_case : int = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(__UpperCamelCase ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(__UpperCamelCase )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
snake_case : Optional[Any] = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
# set step values
self.scheduler.set_timesteps(__UpperCamelCase , device=audio.device )
snake_case : List[Any] = self.scheduler.timesteps.to(__UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
snake_case : Any = self.unet(__UpperCamelCase , __UpperCamelCase ).sample
# 2. compute previous image: x_t -> t_t-1
snake_case : int = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
snake_case : Any = audio.clamp(-1 , 1 ).float().cpu().numpy()
snake_case : Optional[int] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=__UpperCamelCase )
| 719 |
import os
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
with open(os.path.dirname(lowercase ) + """/grid.txt""" ) as f:
snake_case : Tuple = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowercase ) for x in f.readline().split()] )
snake_case : Optional[Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
snake_case : List[Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
snake_case : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
snake_case : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
snake_case : str = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
snake_case : int = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
snake_case : int = temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
snake_case : Any = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
snake_case : Any = temp
return maximum
if __name__ == "__main__":
print(solution())
| 684 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> bool:
if not isinstance(lowercase ,lowercase ):
snake_case : Dict = f"""Input value of [number={number}] must be an integer"""
raise TypeError(lowercase )
if number < 0:
return False
snake_case : Dict = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list:
for i in range(len(lowercase ) - 1 ,0 ,-1 ):
snake_case : Any = False
for j in range(lowercase ,0 ,-1 ):
if unsorted[j] < unsorted[j - 1]:
snake_case , snake_case : Optional[Any] = unsorted[j - 1], unsorted[j]
snake_case : Dict = True
for j in range(lowercase ):
if unsorted[j] > unsorted[j + 1]:
snake_case , snake_case : Dict = unsorted[j + 1], unsorted[j]
snake_case : Tuple = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Any = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase : Optional[int] = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 684 | 0 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
lowerCamelCase : str = '''https://www.google.com/search?q=''' + ''' '''.join(sys.argv[1:])
lowerCamelCase : int = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(1_0_0_0_0):
out_file.write(data)
lowerCamelCase : Dict = BeautifulSoup(res.text, 'html.parser')
lowerCamelCase : Tuple = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get("href")}""")
| 721 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Any = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
lowerCamelCase : Any = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
lowerCamelCase : Optional[int] = {
'jukebox': 5_1_2,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_LYRIC_TOKENS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A , A , A=["v3", "v2", "v2"] , A=5_1_2 , A=5 , A="<|endoftext|>" , **A , ) -> Optional[Any]:
snake_case : Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
super().__init__(
unk_token=A , n_genres=A , version=A , max_n_lyric_tokens=A , **A , )
snake_case : Optional[Any] = version
snake_case : Optional[Any] = max_n_lyric_tokens
snake_case : Tuple = n_genres
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : Union[str, Any] = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : str = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : List[str] = json.load(A )
snake_case : Tuple = r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 7_9:
snake_case : Optional[Any] = oov.replace(r"""\-'""" , r"""\-+'""" )
snake_case : Optional[Any] = regex.compile(A )
snake_case : Optional[Any] = {v: k for k, v in self.artists_encoder.items()}
snake_case : int = {v: k for k, v in self.genres_encoder.items()}
snake_case : List[Any] = {v: k for k, v in self.lyrics_encoder.items()}
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def UpperCAmelCase ( self ) -> str:
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def UpperCAmelCase ( self , A , A , A ) -> Optional[Any]:
snake_case : Optional[int] = [self.artists_encoder.get(A , 0 ) for artist in list_artists]
for genres in range(len(A ) ):
snake_case : Optional[int] = [self.genres_encoder.get(A , 0 ) for genre in list_genres[genres]]
snake_case : Union[str, Any] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
snake_case : Optional[Any] = [[self.lyrics_encoder.get(A , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def UpperCAmelCase ( self , A ) -> List[str]:
return list(A )
def UpperCAmelCase ( self , A , A , A , **A ) -> List[str]:
snake_case , snake_case , snake_case : Any = self.prepare_for_tokenization(A , A , A )
snake_case : Tuple = self._tokenize(A )
return artist, genre, lyrics
def UpperCAmelCase ( self , A , A , A , A = False ) -> Tuple[str, str, str, Dict[str, Any]]:
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
snake_case : Tuple = artists[idx].lower()
snake_case : List[Any] = [genres[idx].lower()]
else:
snake_case : Union[str, Any] = self._normalize(artists[idx] ) + """.v2"""
snake_case : Any = [
self._normalize(A ) + """.v2""" for genre in genres[idx].split("""_""" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
snake_case : str = regex.compile(r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" )
snake_case : Dict = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
snake_case : Union[str, Any] = {vocab[index]: index + 1 for index in range(len(A ) )}
snake_case : Optional[int] = 0
snake_case : Union[str, Any] = len(A ) + 1
snake_case : Optional[int] = self.vocab
snake_case : str = {v: k for k, v in self.vocab.items()}
snake_case : int = """"""
else:
snake_case : Optional[int] = regex.compile(r"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""" )
snake_case : int = self._run_strip_accents(A )
snake_case : Any = lyrics.replace("""\\""" , """\n""" )
snake_case : Tuple = self.out_of_vocab.sub("""""" , A ), [], []
return artists, genres, lyrics
def UpperCAmelCase ( self , A ) -> List[Any]:
snake_case : int = unicodedata.normalize("""NFD""" , A )
snake_case : int = []
for char in text:
snake_case : Optional[Any] = unicodedata.category(A )
if cat == "Mn":
continue
output.append(A )
return "".join(A )
def UpperCAmelCase ( self , A ) -> str:
snake_case : Dict = (
[chr(A ) for i in range(ord("""a""" ) , ord("""z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""A""" ) , ord("""Z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""0""" ) , ord("""9""" ) + 1 )]
+ ["""."""]
)
snake_case : Dict = frozenset(A )
snake_case : Dict = re.compile(r"""_+""" )
snake_case : str = """""".join([c if c in accepted else """_""" for c in text.lower()] )
snake_case : List[Any] = pattern.sub("""_""" , A ).strip("""_""" )
return text
def UpperCAmelCase ( self , A ) -> str:
return " ".join(A )
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[Any]:
# Convert to TensorType
if not isinstance(A , A ):
snake_case : Tuple = TensorType(A )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""" )
import tensorflow as tf
snake_case : Union[str, Any] = tf.constant
snake_case : int = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""" )
import torch
snake_case : List[str] = torch.tensor
snake_case : Optional[Any] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""" )
import jax.numpy as jnp # noqa: F811
snake_case : Optional[int] = jnp.array
snake_case : Dict = _is_jax
else:
snake_case : List[str] = np.asarray
snake_case : Tuple = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
snake_case : Any = [inputs]
if not is_tensor(A ):
snake_case : List[Any] = as_tensor(A )
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""" )
return inputs
def __call__( self , A , A , A="" , A="pt" ) -> BatchEncoding:
snake_case : List[str] = [0, 0, 0]
snake_case : List[str] = [artist] * len(self.version )
snake_case : List[Any] = [genres] * len(self.version )
snake_case , snake_case , snake_case : Optional[int] = self.tokenize(A , A , A )
snake_case , snake_case , snake_case : int = self._convert_token_to_id(A , A , A )
snake_case : Any = [-INFINITY] * len(full_tokens[-1] )
snake_case : int = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=A )
for i in range(len(self.version ) )
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks} )
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=A ) )
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=A ) )
snake_case : Tuple = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=A ) )
return (artists_file, genres_file, lyrics_file)
def UpperCAmelCase ( self , A , A , A ) -> List[Any]:
snake_case : Optional[int] = self.artists_decoder.get(A )
snake_case : Optional[Any] = [self.genres_decoder.get(A ) for genre in genres_index]
snake_case : Optional[int] = [self.lyrics_decoder.get(A ) for character in lyric_index]
return artist, genres, lyrics
| 684 | 0 |
import math
def SCREAMING_SNAKE_CASE__ ( lowercase = 100 ) -> Tuple:
snake_case : Any = sum(i * i for i in range(1 ,n + 1 ) )
snake_case : Optional[Any] = int(math.pow(sum(range(1 ,n + 1 ) ) ,2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 700 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
snake_case : str = len(lowercase )
snake_case : Tuple = []
for i in range(len(lowercase ) - pat_len + 1 ):
snake_case : str = True
for j in range(lowercase ):
if s[i + j] != pattern[j]:
snake_case : Dict = False
break
if match_found:
position.append(lowercase )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 684 | 0 |
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCamelCase = parser.parse_args()
if args.model_type == "roberta":
lowerCamelCase = RobertaForMaskedLM.from_pretrained(args.model_name)
lowerCamelCase = 'roberta'
elif args.model_type == "gpt2":
lowerCamelCase = GPTaLMHeadModel.from_pretrained(args.model_name)
lowerCamelCase = 'transformer'
lowerCamelCase = model.state_dict()
lowerCamelCase = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
lowerCamelCase = state_dict[f"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
lowerCamelCase = f"""{prefix}.embeddings.{w}.weight"""
lowerCamelCase = state_dict[param_name]
for w in ["weight", "bias"]:
lowerCamelCase = f"""{prefix}.embeddings.LayerNorm.{w}"""
lowerCamelCase = state_dict[param_name]
# Transformer Blocks #
lowerCamelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
lowerCamelCase = state_dict[
f"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
lowerCamelCase = state_dict[f"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
lowerCamelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
lowerCamelCase = state_dict[f"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCamelCase = state_dict[f"""lm_head.dense.{w}"""]
lowerCamelCase = state_dict[f"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
lowerCamelCase = state_dict[f"""{prefix}.ln_f.{w}"""]
lowerCamelCase = state_dict['lm_head.weight']
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 701 |
import numpy as np
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 0 |
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCamelCase : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Tuple:
warnings.warn(
"""The preprocess method is deprecated and will be removed in a future version. Please"""
""" use VaeImageProcessor.preprocess instead""" ,lowercase ,)
if isinstance(lowercase ,torch.Tensor ):
return image
elif isinstance(lowercase ,PIL.Image.Image ):
snake_case : Dict = [image]
if isinstance(image[0] ,PIL.Image.Image ):
snake_case , snake_case : List[Any] = image[0].size
snake_case , snake_case : Any = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
snake_case : Dict = [np.array(i.resize((w, h) ,resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
snake_case : str = np.concatenate(lowercase ,axis=0 )
snake_case : Optional[Any] = np.array(lowercase ).astype(np.floataa ) / 255.0
snake_case : Any = image.transpose(0 ,3 ,1 ,2 )
snake_case : Tuple = 2.0 * image - 1.0
snake_case : Optional[Any] = torch.from_numpy(lowercase )
elif isinstance(image[0] ,torch.Tensor ):
snake_case : str = torch.cat(lowercase ,dim=0 )
return image
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Dict:
if isinstance(lowercase ,torch.Tensor ):
return mask
elif isinstance(lowercase ,PIL.Image.Image ):
snake_case : Optional[int] = [mask]
if isinstance(mask[0] ,PIL.Image.Image ):
snake_case , snake_case : Dict = mask[0].size
snake_case , snake_case : Union[str, Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
snake_case : Union[str, Any] = [np.array(m.convert("""L""" ).resize((w, h) ,resample=PIL_INTERPOLATION["""nearest"""] ) )[None, :] for m in mask]
snake_case : str = np.concatenate(lowercase ,axis=0 )
snake_case : Optional[Any] = mask.astype(np.floataa ) / 255.0
snake_case : Tuple = 0
snake_case : Optional[Any] = 1
snake_case : int = torch.from_numpy(lowercase )
elif isinstance(mask[0] ,torch.Tensor ):
snake_case : List[str] = torch.cat(lowercase ,dim=0 )
return mask
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = 42
_snake_case = 42
def __init__( self , A , A ) -> Union[str, Any]:
super().__init__()
self.register_modules(unet=A , scheduler=A )
@torch.no_grad()
def __call__( self , A , A , A = 2_5_0 , A = 0.0 , A = 1_0 , A = 1_0 , A = None , A = "pil" , A = True , ) -> Union[ImagePipelineOutput, Tuple]:
snake_case : Union[str, Any] = image
snake_case : str = _preprocess_image(A )
snake_case : List[str] = original_image.to(device=self.device , dtype=self.unet.dtype )
snake_case : Dict = _preprocess_mask(A )
snake_case : List[str] = mask_image.to(device=self.device , dtype=self.unet.dtype )
snake_case : str = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(A , A ) and len(A ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(A )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
snake_case : Any = original_image.shape
snake_case : List[str] = randn_tensor(A , generator=A , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(A , A , A , self.device )
snake_case : Optional[int] = eta
snake_case : Union[str, Any] = self.scheduler.timesteps[0] + 1
snake_case : int = generator[0] if isinstance(A , A ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
snake_case : List[Any] = self.unet(A , A ).sample
# compute previous image: x_t -> x_t-1
snake_case : Tuple = self.scheduler.step(A , A , A , A , A , A ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
snake_case : Dict = self.scheduler.undo_step(A , A , A )
snake_case : Union[str, Any] = t
snake_case : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
snake_case : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case : Any = self.numpy_to_pil(A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A )
| 702 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase : Tuple = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 684 | 0 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __lowercase (UpperCamelCase_ ):
"""simple docstring"""
_snake_case = 42
class __lowercase (nn.Module ):
"""simple docstring"""
def __init__( self , A=3 , A=3 , A=("DownEncoderBlock2D",) , A=(6_4,) , A=2 , A=3_2 , A="silu" , A=True , ) -> List[str]:
super().__init__()
snake_case : Optional[Any] = layers_per_block
snake_case : Optional[Any] = torch.nn.Convad(
__A , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
snake_case : int = None
snake_case : Tuple = nn.ModuleList([] )
# down
snake_case : str = block_out_channels[0]
for i, down_block_type in enumerate(__A ):
snake_case : List[Any] = output_channel
snake_case : Union[str, Any] = block_out_channels[i]
snake_case : int = i == len(__A ) - 1
snake_case : str = get_down_block(
__A , num_layers=self.layers_per_block , in_channels=__A , out_channels=__A , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__A , resnet_groups=__A , attention_head_dim=__A , temb_channels=__A , )
self.down_blocks.append(__A )
# mid
snake_case : List[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__A , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=__A , temb_channels=__A , )
# out
snake_case : Tuple = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__A , eps=1e-6 )
snake_case : List[str] = nn.SiLU()
snake_case : Tuple = 2 * out_channels if double_z else out_channels
snake_case : Tuple = nn.Convad(block_out_channels[-1] , __A , 3 , padding=1 )
snake_case : Any = False
def UpperCAmelCase ( self , A ) -> Any:
snake_case : int = x
snake_case : List[Any] = self.conv_in(__A )
if self.training and self.gradient_checkpointing:
def create_custom_forward(A ):
def custom_forward(*A ):
return module(*__A )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
snake_case : Optional[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__A ) , __A , use_reentrant=__A )
# middle
snake_case : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __A , use_reentrant=__A )
else:
for down_block in self.down_blocks:
snake_case : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(__A ) , __A )
# middle
snake_case : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __A )
else:
# down
for down_block in self.down_blocks:
snake_case : int = down_block(__A )
# middle
snake_case : Optional[Any] = self.mid_block(__A )
# post-process
snake_case : Any = self.conv_norm_out(__A )
snake_case : Union[str, Any] = self.conv_act(__A )
snake_case : Union[str, Any] = self.conv_out(__A )
return sample
class __lowercase (nn.Module ):
"""simple docstring"""
def __init__( self , A=3 , A=3 , A=("UpDecoderBlock2D",) , A=(6_4,) , A=2 , A=3_2 , A="silu" , A="group" , ) -> int:
super().__init__()
snake_case : List[str] = layers_per_block
snake_case : str = nn.Convad(
__A , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
snake_case : str = None
snake_case : int = nn.ModuleList([] )
snake_case : str = in_channels if norm_type == "spatial" else None
# mid
snake_case : Tuple = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__A , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__A , temb_channels=__A , )
# up
snake_case : Union[str, Any] = list(reversed(__A ) )
snake_case : Dict = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__A ):
snake_case : str = output_channel
snake_case : int = reversed_block_out_channels[i]
snake_case : List[Any] = i == len(__A ) - 1
snake_case : Tuple = get_up_block(
__A , num_layers=self.layers_per_block + 1 , in_channels=__A , out_channels=__A , prev_output_channel=__A , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__A , resnet_groups=__A , attention_head_dim=__A , temb_channels=__A , resnet_time_scale_shift=__A , )
self.up_blocks.append(__A )
snake_case : Optional[Any] = output_channel
# out
if norm_type == "spatial":
snake_case : str = SpatialNorm(block_out_channels[0] , __A )
else:
snake_case : Optional[int] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__A , eps=1e-6 )
snake_case : List[Any] = nn.SiLU()
snake_case : List[Any] = nn.Convad(block_out_channels[0] , __A , 3 , padding=1 )
snake_case : Optional[int] = False
def UpperCAmelCase ( self , A , A=None ) -> Union[str, Any]:
snake_case : List[str] = z
snake_case : str = self.conv_in(__A )
snake_case : Dict = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(A ):
def custom_forward(*A ):
return module(*__A )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
snake_case : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __A , __A , use_reentrant=__A )
snake_case : int = sample.to(__A )
# up
for up_block in self.up_blocks:
snake_case : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(__A ) , __A , __A , use_reentrant=__A )
else:
# middle
snake_case : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __A , __A )
snake_case : List[Any] = sample.to(__A )
# up
for up_block in self.up_blocks:
snake_case : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(__A ) , __A , __A )
else:
# middle
snake_case : Tuple = self.mid_block(__A , __A )
snake_case : Optional[int] = sample.to(__A )
# up
for up_block in self.up_blocks:
snake_case : Optional[int] = up_block(__A , __A )
# post-process
if latent_embeds is None:
snake_case : Tuple = self.conv_norm_out(__A )
else:
snake_case : Optional[Any] = self.conv_norm_out(__A , __A )
snake_case : Union[str, Any] = self.conv_act(__A )
snake_case : Any = self.conv_out(__A )
return sample
class __lowercase (nn.Module ):
"""simple docstring"""
def __init__( self , A , A , A , A=None , A="random" , A=False , A=True ) -> Optional[Any]:
super().__init__()
snake_case : Dict = n_e
snake_case : Optional[Any] = vq_embed_dim
snake_case : List[Any] = beta
snake_case : str = legacy
snake_case : Union[str, Any] = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
snake_case : List[str] = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
snake_case : Any = self.used.shape[0]
snake_case : str = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
snake_case : Union[str, Any] = self.re_embed
snake_case : Any = self.re_embed + 1
print(
f"""Remapping {self.n_e} indices to {self.re_embed} indices. """
f"""Using {self.unknown_index} for unknown indices.""" )
else:
snake_case : str = n_e
snake_case : Tuple = sane_index_shape
def UpperCAmelCase ( self , A ) -> str:
snake_case : Optional[Any] = inds.shape
assert len(__A ) > 1
snake_case : int = inds.reshape(ishape[0] , -1 )
snake_case : Optional[int] = self.used.to(__A )
snake_case : List[str] = (inds[:, :, None] == used[None, None, ...]).long()
snake_case : List[str] = match.argmax(-1 )
snake_case : Union[str, Any] = match.sum(2 ) < 1
if self.unknown_index == "random":
snake_case : Union[str, Any] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
snake_case : Optional[Any] = self.unknown_index
return new.reshape(__A )
def UpperCAmelCase ( self , A ) -> Optional[int]:
snake_case : str = inds.shape
assert len(__A ) > 1
snake_case : Optional[int] = inds.reshape(ishape[0] , -1 )
snake_case : List[str] = self.used.to(__A )
if self.re_embed > self.used.shape[0]: # extra token
snake_case : List[str] = 0 # simply set to zero
snake_case : int = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __A )
return back.reshape(__A )
def UpperCAmelCase ( self , A ) -> int:
# reshape z -> (batch, height, width, channel) and flatten
snake_case : int = z.permute(0 , 2 , 3 , 1 ).contiguous()
snake_case : Tuple = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
snake_case : List[str] = torch.argmin(torch.cdist(__A , self.embedding.weight ) , dim=1 )
snake_case : Optional[Any] = self.embedding(__A ).view(z.shape )
snake_case : List[str] = None
snake_case : Union[str, Any] = None
# compute loss for embedding
if not self.legacy:
snake_case : List[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
snake_case : Union[str, Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
snake_case : Optional[int] = z + (z_q - z).detach()
# reshape back to match original input shape
snake_case : List[str] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
snake_case : str = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
snake_case : Tuple = self.remap_to_used(__A )
snake_case : Optional[int] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
snake_case : List[str] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def UpperCAmelCase ( self , A , A ) -> Optional[Any]:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
snake_case : Optional[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
snake_case : Dict = self.unmap_to_all(__A )
snake_case : Dict = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
snake_case : Dict = self.embedding(__A )
if shape is not None:
snake_case : str = z_q.view(__A )
# reshape back to match original input shape
snake_case : Optional[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __lowercase (UpperCamelCase_ ):
"""simple docstring"""
def __init__( self , A , A=False ) -> Dict:
snake_case : Dict = parameters
snake_case : Optional[Any] = torch.chunk(__A , 2 , dim=1 )
snake_case : Any = torch.clamp(self.logvar , -3_0.0 , 2_0.0 )
snake_case : Optional[int] = deterministic
snake_case : Optional[Any] = torch.exp(0.5 * self.logvar )
snake_case : Optional[Any] = torch.exp(self.logvar )
if self.deterministic:
snake_case : List[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def UpperCAmelCase ( self , A = None ) -> Tuple:
# make sure sample is on the same device as the parameters and has same dtype
snake_case : List[Any] = randn_tensor(
self.mean.shape , generator=__A , device=self.parameters.device , dtype=self.parameters.dtype )
snake_case : List[Any] = self.mean + self.std * sample
return x
def UpperCAmelCase ( self , A=None ) -> int:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def UpperCAmelCase ( self , A , A=[1, 2, 3] ) -> str:
if self.deterministic:
return torch.Tensor([0.0] )
snake_case : Tuple = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__A )
def UpperCAmelCase ( self ) -> Union[str, Any]:
return self.mean
| 703 |
lowerCamelCase : Union[str, Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCamelCase : Tuple = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCamelCase : Union[str, Any] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 684 | 0 |
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" ,[
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(_lowerCamelCase ,i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 ,4 ), range(4 ,7 ), range(7 ,10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 ,1 ), range(1 ,2 ), range(2 ,3 )]),
] ,)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> List[Any]:
snake_case : str = _distribute_shards(**_lowerCamelCase )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" ,[
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] ,)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Optional[int]:
snake_case : str = _split_gen_kwargs(_lowerCamelCase ,_lowerCamelCase )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" ,[
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] ,)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> List[Any]:
if expected is RuntimeError:
with pytest.raises(_lowerCamelCase ):
_number_of_shards_in_gen_kwargs(_lowerCamelCase )
else:
snake_case : Dict = _number_of_shards_in_gen_kwargs(_lowerCamelCase )
assert out == expected
| 704 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'vocab_file': 'spm_char.model'}
lowerCamelCase : List[str] = {
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
lowerCamelCase : List[Any] = {
'microsoft/speecht5_asr': 1_0_2_4,
'microsoft/speecht5_tts': 1_0_2_4,
'microsoft/speecht5_vc': 1_0_2_4,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A="<s>" , A="</s>" , A="<unk>" , A="<pad>" , A = None , **A , ) -> None:
snake_case : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , pad_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
snake_case : Tuple = vocab_file
snake_case : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def UpperCAmelCase ( self ) -> List[Any]:
return self.sp_model.get_piece_size()
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Any = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[str]:
snake_case : Optional[Any] = self.__dict__.copy()
snake_case : Optional[Any] = None
return state
def __setstate__( self , A ) -> Tuple:
snake_case : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case : List[Any] = {}
snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def UpperCAmelCase ( self , A ) -> Tuple:
return self.sp_model.piece_to_id(A )
def UpperCAmelCase ( self , A ) -> int:
snake_case : Union[str, Any] = self.sp_model.IdToPiece(A )
return token
def UpperCAmelCase ( self , A ) -> Tuple:
snake_case : Optional[int] = []
snake_case : str = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A ) + token
snake_case : Dict = []
else:
current_sub_tokens.append(A )
out_string += self.sp_model.decode(A )
return out_string.strip()
def UpperCAmelCase ( self , A , A=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
snake_case : Any = [1]
if token_ids_a is None:
return ([0] * len(A )) + suffix_ones
return ([0] * len(A )) + ([0] * len(A )) + suffix_ones
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Optional[Any] = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , """wb""" ) as fi:
snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 684 | 0 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowercase (__SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_snake_case = TextToVideoSDPipeline
_snake_case = TEXT_TO_IMAGE_PARAMS
_snake_case = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
_snake_case = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def UpperCAmelCase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
snake_case : List[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=3_2 , attention_head_dim=4 , )
snake_case : Tuple = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
snake_case : str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
snake_case : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="""gelu""" , projection_dim=5_1_2 , )
snake_case : List[Any] = CLIPTextModel(_a )
snake_case : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def UpperCAmelCase ( self , A , A=0 ) -> str:
if str(_a ).startswith("""mps""" ):
snake_case : Dict = torch.manual_seed(_a )
else:
snake_case : str = torch.Generator(device=_a ).manual_seed(_a )
snake_case : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def UpperCAmelCase ( self ) -> int:
snake_case : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case : str = self.get_dummy_components()
snake_case : Optional[Any] = TextToVideoSDPipeline(**_a )
snake_case : str = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
snake_case : Optional[Any] = self.get_dummy_inputs(_a )
snake_case : int = """np"""
snake_case : List[str] = sd_pipe(**_a ).frames
snake_case : int = frames[0][-3:, -3:, -1]
assert frames[0].shape == (6_4, 6_4, 3)
snake_case : List[Any] = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self ) -> Dict:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_a , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCAmelCase ( self ) -> int:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_a , expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def UpperCAmelCase ( self ) -> Dict:
pass
def UpperCAmelCase ( self ) -> str:
return super().test_progress_bar()
@slow
@skip_mps
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Tuple:
snake_case : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
snake_case : Optional[Any] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
snake_case : Tuple = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
snake_case : List[Any] = pipe.to("""cuda""" )
snake_case : Optional[Any] = """Spiderman is surfing"""
snake_case : Optional[int] = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case : List[str] = pipe(_a , generator=_a , num_inference_steps=2_5 , output_type="""pt""" ).frames
snake_case : str = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
snake_case : Optional[Any] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
snake_case : Any = pipe.to("""cuda""" )
snake_case : Any = """Spiderman is surfing"""
snake_case : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case : List[str] = pipe(_a , generator=_a , num_inference_steps=2 , output_type="""pt""" ).frames
snake_case : List[Any] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 705 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """gpt_neox_japanese"""
def __init__( self , A=3_2_0_0_0 , A=2_5_6_0 , A=3_2 , A=3_2 , A=4 , A="gelu" , A=1.00 , A=1_0_0_0_0 , A=2_0_4_8 , A=0.02 , A=1e-5 , A=True , A=3_1_9_9_6 , A=3_1_9_9_9 , A=0.1 , A=0.0 , **A , ) -> str:
super().__init__(bos_token_id=A , eos_token_id=A , **A )
snake_case : Optional[Any] = vocab_size
snake_case : Optional[Any] = max_position_embeddings
snake_case : Union[str, Any] = hidden_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : Optional[int] = intermediate_multiple_size
snake_case : int = hidden_act
snake_case : str = rotary_pct
snake_case : Optional[Any] = rotary_emb_base
snake_case : Any = initializer_range
snake_case : Any = layer_norm_eps
snake_case : Optional[Any] = use_cache
snake_case : Tuple = attention_dropout
snake_case : Tuple = hidden_dropout
| 684 | 0 |
lowerCamelCase : Dict = range(2, 2_0 + 1)
lowerCamelCase : Dict = [1_0**k for k in range(ks[-1] + 1)]
lowerCamelCase : int = {}
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> Dict:
snake_case : Union[str, Any] = sum(a_i[j] for j in range(snake_case__ ,len(snake_case__ ) ) )
snake_case : Optional[int] = sum(a_i[j] * base[j] for j in range(min(len(snake_case__ ) ,snake_case__ ) ) )
snake_case : int = 0, 0
snake_case : List[str] = n - i
snake_case : List[str] = memo.get(snake_case__ )
if sub_memo is not None:
snake_case : Optional[Any] = sub_memo.get(snake_case__ )
if jumps is not None and len(snake_case__ ) > 0:
# find and make the largest jump without going over
snake_case : Optional[int] = -1
for _k in range(len(snake_case__ ) - 1 ,-1 ,-1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
snake_case : Any = _k
break
if max_jump >= 0:
snake_case : Optional[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
snake_case : List[str] = diff + c
for j in range(min(snake_case__ ,len(snake_case__ ) ) ):
snake_case : Union[str, Any] = divmod(snake_case__ ,10 )
if new_c > 0:
add(snake_case__ ,snake_case__ ,snake_case__ )
else:
snake_case : Any = []
else:
snake_case : Dict = {c: []}
snake_case : List[Any] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
snake_case : Optional[int] = next_term(snake_case__ ,k - 1 ,i + dn ,snake_case__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
snake_case : List[Any] = compute(snake_case__ ,snake_case__ ,i + dn ,snake_case__ )
diff += _diff
dn += terms_jumped
snake_case : Dict = sub_memo[c]
# keep jumps sorted by # of terms skipped
snake_case : Tuple = 0
while j < len(snake_case__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(snake_case__ ,(diff, dn, k) )
return (diff, dn)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> Tuple:
if i >= n:
return 0, i
if k > len(snake_case__ ):
a_i.extend([0 for _ in range(k - len(snake_case__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
snake_case : Tuple = i
snake_case : Optional[Any] = 0, 0, 0
for j in range(len(snake_case__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
snake_case : Optional[Any] = ds_c + ds_b
diff += addend
snake_case : int = 0
for j in range(snake_case__ ):
snake_case : Optional[int] = a_i[j] + addend
snake_case : str = divmod(snake_case__ ,10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(snake_case__ ,snake_case__ ,snake_case__ )
return diff, i - start_i
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Tuple:
for j in range(snake_case__ ,len(snake_case__ ) ):
snake_case : Any = digits[j] + addend
if s >= 10:
snake_case : List[Any] = divmod(snake_case__ ,10 )
snake_case : List[str] = addend // 10 + quotient
else:
snake_case : str = s
snake_case : Dict = addend // 10
if addend == 0:
break
while addend > 0:
snake_case : Any = divmod(snake_case__ ,10 )
digits.append(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( lowercase = 10**15 ) -> Dict:
snake_case : Any = [1]
snake_case : Tuple = 1
snake_case : int = 0
while True:
snake_case : str = next_term(snake_case__ ,20 ,i + dn ,snake_case__ )
dn += terms_jumped
if dn == n - i:
break
snake_case : Any = 0
for j in range(len(snake_case__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 706 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Optional[Any] = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
snake_case : Any = hex_num[0] == """-"""
if is_negative:
snake_case : int = hex_num[1:]
try:
snake_case : List[Any] = int(lowercase ,16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
snake_case : Dict = """"""
while int_num > 0:
snake_case : Dict = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 0 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
lowerCamelCase : int = logging.get_logger('transformers.models.speecht5')
lowerCamelCase : Optional[int] = {
'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm',
'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection',
'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv',
'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed',
}
lowerCamelCase : str = {
'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens',
'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha',
}
lowerCamelCase : int = {
'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0',
'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1',
'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer',
'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha',
'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer',
}
lowerCamelCase : str = {
'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out',
'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out',
'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv',
'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm',
'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv',
'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm',
'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv',
'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm',
'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv',
'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm',
'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv',
'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm',
}
lowerCamelCase : List[str] = {
'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens',
}
lowerCamelCase : str = {
'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head',
}
lowerCamelCase : Any = {
'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj',
'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj',
'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj',
'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj',
'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm',
'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense',
'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense',
'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm',
'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k',
}
lowerCamelCase : List[str] = {
'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj',
'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj',
'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj',
'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj',
'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm',
'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj',
'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj',
'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj',
'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj',
'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm',
'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense',
'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense',
'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm',
}
lowerCamelCase : Tuple = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
lowerCamelCase : Tuple = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
lowerCamelCase : Optional[Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
lowerCamelCase : Optional[int] = []
lowerCamelCase : List[str] = [
'encoder.version',
'encoder.layers.*.norm_k.weight',
'encoder.layers.*.norm_k.bias',
'decoder.version',
'decoder.layers.*.norm_k.weight',
'decoder.layers.*.norm_k.bias',
'decoder.pos_emb.pe_k',
'speech_encoder_prenet.embed_positions._float_tensor',
'text_decoder_prenet.embed_positions._float_tensor',
]
lowerCamelCase : Optional[Any] = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'speech_decoder_prenet.*',
'speech_decoder_postnet.*',
]
lowerCamelCase : str = IGNORE_KEYS + [
'encoder.proj',
'speech_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
lowerCamelCase : Optional[int] = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ) -> Optional[int]:
for attribute in key.split(""".""" ):
snake_case : List[Any] = getattr(__snake_case ,__snake_case )
if weight_type is not None:
snake_case : Tuple = getattr(__snake_case ,__snake_case ).shape
else:
snake_case : Union[str, Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
snake_case : Optional[Any] = value
elif weight_type == "weight_g":
snake_case : str = value
elif weight_type == "weight_v":
snake_case : Optional[int] = value
elif weight_type == "bias":
snake_case : List[str] = value
elif weight_type == "running_mean":
snake_case : Dict = value
elif weight_type == "running_var":
snake_case : List[str] = value
elif weight_type == "num_batches_tracked":
snake_case : Any = value
else:
snake_case : List[str] = value
logger.info(f"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Optional[Any]:
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
snake_case , snake_case : Union[str, Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> List[str]:
snake_case : List[str] = []
if task == "s2t":
snake_case : int = hf_model.speechta.encoder.prenet.feature_encoder
snake_case : int = MAPPING_S2T
snake_case : Optional[Any] = IGNORE_KEYS_S2T
elif task == "t2s":
snake_case : List[str] = None
snake_case : str = MAPPING_T2S
snake_case : Dict = IGNORE_KEYS_T2S
elif task == "s2s":
snake_case : Tuple = hf_model.speechta.encoder.prenet.feature_encoder
snake_case : Tuple = MAPPING_S2S
snake_case : str = IGNORE_KEYS_S2S
else:
raise ValueError(f"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(__snake_case ,__snake_case ):
logger.info(f"""{name} was ignored""" )
continue
snake_case : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,hf_model.config.feat_extract_norm == """group""" ,)
snake_case : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
snake_case , snake_case : int = key.split(""".*.""" )
if prefix in name and suffix in name:
snake_case : Tuple = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
snake_case : Optional[Any] = True
if "*" in mapped_key:
snake_case : Any = name.split(__snake_case )[0].split(""".""" )[-2]
snake_case : Union[str, Any] = mapped_key.replace("""*""" ,__snake_case )
if "weight_g" in name:
snake_case : Union[str, Any] = """weight_g"""
elif "weight_v" in name:
snake_case : int = """weight_v"""
elif "bias" in name:
snake_case : str = """bias"""
elif "weight" in name:
snake_case : Dict = """weight"""
elif "running_mean" in name:
snake_case : Any = """running_mean"""
elif "running_var" in name:
snake_case : Any = """running_var"""
elif "num_batches_tracked" in name:
snake_case : Optional[Any] = """num_batches_tracked"""
else:
snake_case : Union[str, Any] = None
set_recursively(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f"""Unused weights: {unused_weights}""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ) -> Tuple:
snake_case : Optional[int] = full_name.split("""conv_layers.""" )[-1]
snake_case : List[Any] = name.split(""".""" )
snake_case : Optional[Any] = int(items[0] )
snake_case : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
snake_case : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
snake_case : Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
snake_case : Tuple = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
snake_case : Dict = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase=None ,lowercase=None ,lowercase=None ,) -> str:
if config_path is not None:
snake_case : List[str] = SpeechTaConfig.from_pretrained(__snake_case )
else:
snake_case : Union[str, Any] = SpeechTaConfig()
if task == "s2t":
snake_case : Optional[int] = config.max_text_positions
snake_case : List[str] = SpeechTaForSpeechToText(__snake_case )
elif task == "t2s":
snake_case : Optional[int] = 1876
snake_case : Union[str, Any] = 600
snake_case : str = config.max_speech_positions
snake_case : Optional[Any] = SpeechTaForTextToSpeech(__snake_case )
elif task == "s2s":
snake_case : List[Any] = 1876
snake_case : Tuple = config.max_speech_positions
snake_case : List[str] = SpeechTaForSpeechToSpeech(__snake_case )
else:
raise ValueError(f"""Unknown task name: {task}""" )
if vocab_path:
snake_case : str = SpeechTaTokenizer(__snake_case ,model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
snake_case : Dict = AddedToken("""<mask>""" ,lstrip=__snake_case ,rstrip=__snake_case )
snake_case : int = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
snake_case : str = SpeechTaFeatureExtractor()
snake_case : Any = SpeechTaProcessor(tokenizer=__snake_case ,feature_extractor=__snake_case )
processor.save_pretrained(__snake_case )
snake_case : List[str] = torch.load(__snake_case )
recursively_load_weights(fairseq_checkpoint["""model"""] ,__snake_case ,__snake_case )
model.save_pretrained(__snake_case )
if repo_id:
print("""Pushing to the hub...""" )
processor.push_to_hub(__snake_case )
model.push_to_hub(__snake_case )
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
parser.add_argument(
'--task',
default='s2t',
type=str,
help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 707 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""pixel_values"""]
def __init__( self , A = True , A = None , A = PIL.Image.BICUBIC , A = True , A = None , A = 1 / 2_5_5 , A = True , A = True , A = None , A = None , **A , ) -> None:
super().__init__(**A )
snake_case : int = size if size is not None else {"""height""": 2_5_6, """width""": 2_5_6}
snake_case : int = get_size_dict(A )
snake_case : Optional[Any] = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
snake_case : Dict = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = do_resize
snake_case : str = size
snake_case : Tuple = resample
snake_case : Any = do_center_crop
snake_case : Tuple = crop_size
snake_case : int = do_rescale
snake_case : Dict = rescale_factor
snake_case : Union[str, Any] = do_normalize
snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase ( self , A , A , A = PIL.Image.BICUBIC , A = None , **A , ) -> np.ndarray:
snake_case : Dict = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
A , size=(size["""height"""], size["""width"""]) , resample=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> np.ndarray:
snake_case : Any = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(A , size=(size["""height"""], size["""width"""]) , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> Tuple:
return rescale(A , scale=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A , A = None , **A , ) -> np.ndarray:
return normalize(A , mean=A , std=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A = None , A = None , A=None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
snake_case : str = do_resize if do_resize is not None else self.do_resize
snake_case : Dict = resample if resample is not None else self.resample
snake_case : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Tuple = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case : int = image_mean if image_mean is not None else self.image_mean
snake_case : List[str] = image_std if image_std is not None else self.image_std
snake_case : Dict = size if size is not None else self.size
snake_case : Tuple = get_size_dict(A )
snake_case : Dict = crop_size if crop_size is not None else self.crop_size
snake_case : List[str] = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case : Optional[Any] = [to_numpy_array(A ) for image in images]
if do_resize:
snake_case : Dict = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_center_crop:
snake_case : List[str] = [self.center_crop(image=A , size=A ) for image in images]
if do_rescale:
snake_case : List[str] = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
snake_case : str = [self.normalize(image=A , mean=A , std=A ) for image in images]
snake_case : Union[str, Any] = [to_channel_dimension_format(A , A ) for image in images]
snake_case : List[Any] = {"""pixel_values""": images}
return BatchFeature(data=A , tensor_type=A )
| 684 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=False ,lowercase=False ,lowercase=False ) -> Dict:
snake_case : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""transformer.blocks.{i}.norm1.weight""", f"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.norm1.bias""", f"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""transformer.blocks.{i}.attn.proj.weight""", f"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""transformer.blocks.{i}.attn.proj.bias""", f"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""transformer.blocks.{i}.norm2.weight""", f"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.norm2.bias""", f"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(f"""transformer.blocks.{i}.mlp.fc1.weight""", f"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc1.bias""", f"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc2.weight""", f"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc2.bias""", f"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
("""text_embeddings.word_embeddings.weight""", """vilt.embeddings.text_embeddings.word_embeddings.weight"""),
(
"""text_embeddings.position_embeddings.weight""",
"""vilt.embeddings.text_embeddings.position_embeddings.weight""",
),
("""text_embeddings.position_ids""", """vilt.embeddings.text_embeddings.position_ids"""),
(
"""text_embeddings.token_type_embeddings.weight""",
"""vilt.embeddings.text_embeddings.token_type_embeddings.weight""",
),
("""text_embeddings.LayerNorm.weight""", """vilt.embeddings.text_embeddings.LayerNorm.weight"""),
("""text_embeddings.LayerNorm.bias""", """vilt.embeddings.text_embeddings.LayerNorm.bias"""),
# patch embeddings
("""transformer.cls_token""", """vilt.embeddings.cls_token"""),
("""transformer.patch_embed.proj.weight""", """vilt.embeddings.patch_embeddings.projection.weight"""),
("""transformer.patch_embed.proj.bias""", """vilt.embeddings.patch_embeddings.projection.bias"""),
("""transformer.pos_embed""", """vilt.embeddings.position_embeddings"""),
# token type embeddings
("""token_type_embeddings.weight""", """vilt.embeddings.token_type_embeddings.weight"""),
] )
# final layernorm + pooler
rename_keys.extend(
[
("""transformer.norm.weight""", """vilt.layernorm.weight"""),
("""transformer.norm.bias""", """vilt.layernorm.bias"""),
("""pooler.dense.weight""", """vilt.pooler.dense.weight"""),
("""pooler.dense.bias""", """vilt.pooler.dense.bias"""),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("""vqa_classifier.0.weight""", """classifier.0.weight"""),
("""vqa_classifier.0.bias""", """classifier.0.bias"""),
("""vqa_classifier.1.weight""", """classifier.1.weight"""),
("""vqa_classifier.1.bias""", """classifier.1.bias"""),
("""vqa_classifier.3.weight""", """classifier.3.weight"""),
("""vqa_classifier.3.bias""", """classifier.3.bias"""),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("""nlvr2_classifier.0.weight""", """classifier.0.weight"""),
("""nlvr2_classifier.0.bias""", """classifier.0.bias"""),
("""nlvr2_classifier.1.weight""", """classifier.1.weight"""),
("""nlvr2_classifier.1.bias""", """classifier.1.bias"""),
("""nlvr2_classifier.3.weight""", """classifier.3.weight"""),
("""nlvr2_classifier.3.bias""", """classifier.3.bias"""),
] )
else:
pass
return rename_keys
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Tuple:
for i in range(config.num_hidden_layers ):
snake_case : Union[str, Any] = """vilt."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case : Tuple = state_dict.pop(f"""transformer.blocks.{i}.attn.qkv.weight""" )
snake_case : Optional[Any] = state_dict.pop(f"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case : List[Any] = in_proj_weight[
: config.hidden_size, :
]
snake_case : List[str] = in_proj_bias[: config.hidden_size]
snake_case : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case : Any = in_proj_weight[
-config.hidden_size :, :
]
snake_case : Dict = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[Any]:
snake_case : Tuple = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__ ,UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Union[str, Any]:
snake_case : Optional[Any] = dct.pop(UpperCAmelCase__ )
snake_case : Optional[Any] = val
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> List[str]:
snake_case : int = ViltConfig(image_size=384 ,patch_size=32 ,tie_word_embeddings=UpperCAmelCase__ )
snake_case : str = False
snake_case : str = False
snake_case : str = False
snake_case : Optional[int] = False
if "vqa" in checkpoint_url:
snake_case : int = True
snake_case : Optional[Any] = 3129
snake_case : int = """huggingface/label-files"""
snake_case : Dict = """vqa2-id2label.json"""
snake_case : Dict = json.load(open(hf_hub_download(UpperCAmelCase__ ,UpperCAmelCase__ ,repo_type="""dataset""" ) ,"""r""" ) )
snake_case : Optional[int] = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
snake_case : Optional[Any] = idalabel
snake_case : Optional[int] = {v: k for k, v in idalabel.items()}
snake_case : List[Any] = ViltForQuestionAnswering(UpperCAmelCase__ )
elif "nlvr" in checkpoint_url:
snake_case : Any = True
snake_case : Tuple = 2
snake_case : Optional[int] = {0: """False""", 1: """True"""}
snake_case : Union[str, Any] = {v: k for k, v in config.idalabel.items()}
snake_case : List[str] = 3
snake_case : Tuple = ViltForImagesAndTextClassification(UpperCAmelCase__ )
elif "irtr" in checkpoint_url:
snake_case : Optional[int] = True
snake_case : Optional[int] = ViltForImageAndTextRetrieval(UpperCAmelCase__ )
elif "mlm_itm" in checkpoint_url:
snake_case : Optional[int] = True
snake_case : Tuple = ViltForMaskedLM(UpperCAmelCase__ )
else:
raise ValueError("""Unknown model type""" )
# load state_dict of original model, remove and rename some keys
snake_case : Tuple = torch.hub.load_state_dict_from_url(UpperCAmelCase__ ,map_location="""cpu""" )["""state_dict"""]
snake_case : int = create_rename_keys(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ )
read_in_q_k_v(UpperCAmelCase__ ,UpperCAmelCase__ )
if mlm_model or irtr_model:
snake_case : Union[str, Any] = ["""itm_score.fc.weight""", """itm_score.fc.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__ ,UpperCAmelCase__ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
snake_case : Tuple = model.load_state_dict(UpperCAmelCase__ ,strict=UpperCAmelCase__ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(UpperCAmelCase__ )
# Define processor
snake_case : Optional[Any] = ViltImageProcessor(size=384 )
snake_case : Optional[int] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
snake_case : Any = ViltProcessor(UpperCAmelCase__ ,UpperCAmelCase__ )
# Forward pass on example inputs (image + text)
if nlvr_model:
snake_case : int = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" ,stream=UpperCAmelCase__ ).raw )
snake_case : Optional[int] = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" ,stream=UpperCAmelCase__ ).raw )
snake_case : Optional[int] = (
"""The left image contains twice the number of dogs as the right image, and at least two dogs in total are"""
""" standing."""
)
snake_case : int = processor(UpperCAmelCase__ ,UpperCAmelCase__ ,return_tensors="""pt""" )
snake_case : Dict = processor(UpperCAmelCase__ ,UpperCAmelCase__ ,return_tensors="""pt""" )
snake_case : Any = model(
input_ids=encoding_a.input_ids ,pixel_values=encoding_a.pixel_values ,pixel_values_a=encoding_a.pixel_values ,)
else:
snake_case : Any = Image.open(requests.get("""http://images.cocodataset.org/val2017/000000039769.jpg""" ,stream=UpperCAmelCase__ ).raw )
if mlm_model:
snake_case : str = """a bunch of [MASK] laying on a [MASK]."""
else:
snake_case : List[str] = """How many cats are there?"""
snake_case : Union[str, Any] = processor(UpperCAmelCase__ ,UpperCAmelCase__ ,return_tensors="""pt""" )
snake_case : Dict = model(**UpperCAmelCase__ )
# Verify outputs
if mlm_model:
snake_case : List[str] = torch.Size([1, 11, 30522] )
snake_case : List[Any] = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] ,UpperCAmelCase__ ,atol=1E-4 )
# verify masked token prediction equals "cats"
snake_case : Any = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
snake_case : Any = torch.Size([1, 3129] )
snake_case : Optional[Any] = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] ,UpperCAmelCase__ ,atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] ,UpperCAmelCase__ ,atol=1E-4 )
# verify vqa prediction equals "2"
snake_case : List[Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
snake_case : List[Any] = torch.Size([1, 2] )
snake_case : List[Any] = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] ,UpperCAmelCase__ ,atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
print(f"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCAmelCase__ )
processor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCamelCase : int = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 708 |
import inspect
import unittest
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase ( self ) -> Tuple:
import diffusers
from diffusers.dependency_versions_table import deps
snake_case : List[str] = inspect.getmembers(A , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
snake_case : Tuple = """k-diffusion"""
elif backend == "invisible_watermark":
snake_case : Optional[int] = """invisible-watermark"""
assert backend in deps, f"""{backend} is not in the deps table!"""
| 684 | 0 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowercase (a__ ):
"""simple docstring"""
@slow
@require_torch
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Tuple = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
snake_case : List[Any] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
snake_case : str = bertabert.config.encoder.vocab_size
snake_case : Optional[int] = tokenizer.sep_token_id
snake_case : Any = tokenizer.cls_token_id
snake_case : str = 1_2_8
snake_case : str = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
snake_case : Union[str, Any] = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
snake_case : List[str] = train_dataset.select(range(3_2 ) )
snake_case : List[Any] = val_dataset.select(range(1_6 ) )
snake_case : List[str] = 4
def _map_to_encoder_decoder_inputs(A ):
# Tokenizer will automatically set [BOS] <text> [EOS]
snake_case : List[str] = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=lowerCAmelCase__ , max_length=5_1_2 )
snake_case : List[str] = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=lowerCAmelCase__ , max_length=1_2_8 )
snake_case : str = inputs.input_ids
snake_case : Tuple = inputs.attention_mask
snake_case : Optional[Any] = outputs.input_ids
snake_case : List[str] = outputs.input_ids.copy()
snake_case : Union[str, Any] = [
[-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
snake_case : str = outputs.attention_mask
assert all(len(lowerCAmelCase__ ) == 5_1_2 for x in inputs.input_ids )
assert all(len(lowerCAmelCase__ ) == 1_2_8 for x in outputs.input_ids )
return batch
def _compute_metrics(A ):
snake_case : List[str] = pred.label_ids
snake_case : str = pred.predictions
# all unnecessary tokens are removed
snake_case : Union[str, Any] = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
snake_case : Optional[Any] = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
snake_case : int = sum([int(pred_str[i] == label_str[i] ) for i in range(len(lowerCAmelCase__ ) )] ) / len(lowerCAmelCase__ )
return {"accuracy": accuracy}
# map train dataset
snake_case : Optional[int] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
snake_case : int = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
snake_case : int = self.get_auto_remove_tmp_dir()
snake_case : int = SeqaSeqTrainingArguments(
output_dir=lowerCAmelCase__ , per_device_train_batch_size=lowerCAmelCase__ , per_device_eval_batch_size=lowerCAmelCase__ , predict_with_generate=lowerCAmelCase__ , evaluation_strategy="""steps""" , do_train=lowerCAmelCase__ , do_eval=lowerCAmelCase__ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
snake_case : Optional[Any] = SeqaSeqTrainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , compute_metrics=_compute_metrics , train_dataset=lowerCAmelCase__ , eval_dataset=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , )
# start training
trainer.train()
| 709 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
lowerCamelCase : Union[str, Any] = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
lowerCamelCase : List[Any] = 'main'
# Default branch name
lowerCamelCase : Tuple = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
lowerCamelCase : List[Any] = 'aaaaaaa'
# This commit does not exist, so we should 404.
lowerCamelCase : List[Any] = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
lowerCamelCase : int = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> int:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> Optional[Any]:
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def UpperCAmelCase ( self ) -> Optional[Any]:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_tf
def UpperCAmelCase ( self ) -> str:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_flax
def UpperCAmelCase ( self ) -> Any:
# Flax models don't have labels
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , [] )
| 684 | 0 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowerCamelCase : str = logging.getLogger(__name__)
class __lowercase (UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self , A , A , A , A=None ) -> Optional[int]:
super().__init__(
__lowerCAmelCase , question_encoder_tokenizer=__lowerCAmelCase , generator_tokenizer=__lowerCAmelCase , index=__lowerCAmelCase , init_retrieval=__lowerCAmelCase , )
snake_case : List[Any] = None
def UpperCAmelCase ( self , A ) -> Union[str, Any]:
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
snake_case : Dict = self._infer_socket_ifname()
# avoid clash with the NCCL port
snake_case : int = str(distributed_port + 1 )
snake_case : Optional[int] = dist.new_group(ranks=__lowerCAmelCase , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def UpperCAmelCase ( self ) -> Union[str, Any]:
return dist.get_rank(group=self.process_group ) == 0
def UpperCAmelCase ( self , A , A , A=torch.floataa ) -> Dict:
snake_case : Optional[int] = torch.empty(__lowerCAmelCase , dtype=__lowerCAmelCase )
dist.scatter(__lowerCAmelCase , src=0 , scatter_list=__lowerCAmelCase , group=self.process_group )
return target_tensor
def UpperCAmelCase ( self ) -> List[str]:
snake_case : str = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
snake_case : Optional[Any] = next((addr for addr in addrs if addr.startswith("""e""" )) , __lowerCAmelCase )
return ifname
def UpperCAmelCase ( self , A , A ) -> Tuple[np.ndarray, List[dict]]:
if not dist.is_initialized():
snake_case , snake_case : List[Any] = self._main_retrieve(__lowerCAmelCase , __lowerCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowerCAmelCase )
# distributed training
snake_case : Union[str, Any] = dist.get_world_size(group=self.process_group )
# gather logic
snake_case : Optional[Any] = None
if self._is_main():
snake_case : Any = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__lowerCAmelCase )]
dist.gather(torch.tensor(__lowerCAmelCase ) , dst=0 , gather_list=__lowerCAmelCase , group=self.process_group )
# scatter logic
snake_case : Optional[int] = question_hidden_states.shape[0]
snake_case : Union[str, Any] = []
snake_case : Optional[int] = []
if self._is_main():
assert len(__lowerCAmelCase ) == world_size
snake_case , snake_case : List[Any] = self._main_retrieve(torch.cat(__lowerCAmelCase ).numpy() , __lowerCAmelCase )
snake_case , snake_case : List[Any] = torch.tensor(__lowerCAmelCase ), torch.tensor(__lowerCAmelCase )
snake_case : List[str] = self._chunk_tensor(__lowerCAmelCase , __lowerCAmelCase )
snake_case : List[str] = self._chunk_tensor(__lowerCAmelCase , __lowerCAmelCase )
snake_case : Dict = self._scattered(__lowerCAmelCase , [n_queries, n_docs] , target_type=torch.intaa )
snake_case : Optional[Any] = self._scattered(__lowerCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__lowerCAmelCase )
| 710 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """audio-spectrogram-transformer"""
def __init__( self , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.0 , A=0.0 , A=0.02 , A=1e-1_2 , A=1_6 , A=True , A=1_0 , A=1_0 , A=1_0_2_4 , A=1_2_8 , **A , ) -> int:
super().__init__(**A )
snake_case : Any = hidden_size
snake_case : Tuple = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Dict = intermediate_size
snake_case : int = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : int = initializer_range
snake_case : int = layer_norm_eps
snake_case : Any = patch_size
snake_case : List[Any] = qkv_bias
snake_case : int = frequency_stride
snake_case : Any = time_stride
snake_case : Union[str, Any] = max_length
snake_case : Any = num_mel_bins
| 684 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.