code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Union[str, Any]:
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(lowercase ):
return ext
raise Exception(
f"""Unable to determine file format from file extension {path}. """
f"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Dict = pipeline(
task=args.task ,model=args.model if args.model else None ,config=args.config ,tokenizer=args.tokenizer ,device=args.device ,)
snake_case : List[str] = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
snake_case : List[str] = PipelineDataFormat.from_str(
format=lowercase ,output_path=args.output ,input_path=args.input ,column=args.column if args.column else nlp.default_input_names ,overwrite=args.overwrite ,)
return RunCommand(lowercase ,lowercase )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , A , A ) -> int:
snake_case : List[str] = nlp
snake_case : Union[str, Any] = reader
@staticmethod
def UpperCAmelCase ( A ) -> Union[str, Any]:
snake_case : str = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""" )
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""" )
run_parser.add_argument("""--input""" , type=A , help="""Path to the file to use for inference""" )
run_parser.add_argument("""--output""" , type=A , help="""Path to the file that will be used post to write results.""" )
run_parser.add_argument("""--model""" , type=A , help="""Name or path to the model to instantiate.""" )
run_parser.add_argument("""--config""" , type=A , help="""Name or path to the model's config to instantiate.""" )
run_parser.add_argument(
"""--tokenizer""" , type=A , help="""Name of the tokenizer to use. (default: same as the model name)""" )
run_parser.add_argument(
"""--column""" , type=A , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=A , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=A , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""" )
run_parser.set_defaults(func=A )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : int = self._nlp, []
for entry in self._reader:
snake_case : Union[str, Any] = nlp(**A ) if self._reader.is_multi_columns else nlp(A )
if isinstance(A , A ):
outputs.append(A )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
snake_case : List[str] = self._reader.save_binary(A )
logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(A )
| 701 |
import numpy as np
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 0 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowerCamelCase : Dict = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
lowerCamelCase : Union[str, Any] = {'facebook/blenderbot-3B': 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
snake_case : Optional[int] = (
list(range(ord("""!""" ) ,ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) ,ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) ,ord("""ÿ""" ) + 1 ) )
)
snake_case : Dict = bs[:]
snake_case : str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase )
cs.append(2**8 + n )
n += 1
snake_case : Union[str, Any] = [chr(lowercase ) for n in cs]
return dict(zip(lowercase ,lowercase ) )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
snake_case : Union[str, Any] = set()
snake_case : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case : List[str] = char
return pairs
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A , A="replace" , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A=False , **A , ) -> Tuple:
snake_case : Union[str, Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else bos_token
snake_case : List[Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else eos_token
snake_case : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else sep_token
snake_case : Union[str, Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else cls_token
snake_case : List[Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
snake_case : Any = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case : str = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
errors=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , add_prefix_space=A , **A , )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : Union[str, Any] = json.load(A )
snake_case : int = {v: k for k, v in self.encoder.items()}
snake_case : str = errors # how to handle errors in decoding
snake_case : Tuple = bytes_to_unicode()
snake_case : Tuple = {v: k for k, v in self.byte_encoder.items()}
with open(A , encoding="""utf-8""" ) as merges_handle:
snake_case : Dict = merges_handle.read().split("""\n""" )[1:-1]
snake_case : List[Any] = [tuple(merge.split() ) for merge in bpe_merges]
snake_case : List[Any] = dict(zip(A , range(len(A ) ) ) )
snake_case : Optional[Any] = {}
snake_case : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case : Optional[int] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCAmelCase ( self ) -> Dict:
return len(self.encoder )
def UpperCAmelCase ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase ( self , A ) -> str:
if token in self.cache:
return self.cache[token]
snake_case : List[str] = tuple(A )
snake_case : List[str] = get_pairs(A )
if not pairs:
return token
while True:
snake_case : Union[str, Any] = min(A , key=lambda A : self.bpe_ranks.get(A , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case : str = bigram
snake_case : Any = []
snake_case : Union[str, Any] = 0
while i < len(A ):
try:
snake_case : str = word.index(A , A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case : Optional[int] = j
if word[i] == first and i < len(A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case : Any = tuple(A )
snake_case : Optional[Any] = new_word
if len(A ) == 1:
break
else:
snake_case : Union[str, Any] = get_pairs(A )
snake_case : Dict = """ """.join(A )
snake_case : List[str] = word
return word
def UpperCAmelCase ( self , A ) -> Tuple:
snake_case : Union[str, Any] = []
for token in re.findall(self.pat , A ):
snake_case : Optional[int] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A ).split(""" """ ) )
return bpe_tokens
def UpperCAmelCase ( self , A ) -> Optional[int]:
return self.encoder.get(A , self.encoder.get(self.unk_token ) )
def UpperCAmelCase ( self , A ) -> List[Any]:
return self.decoder.get(A )
def UpperCAmelCase ( self , A ) -> str:
snake_case : Union[str, Any] = """""".join(A )
snake_case : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Optional[Any] = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A , ensure_ascii=A ) + """\n""" )
snake_case : int = 0
with open(A , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
snake_case : List[Any] = token_index
writer.write(""" """.join(A ) + """\n""" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def UpperCAmelCase ( self , A , A = None ) -> List[int]:
snake_case : int = [self.sep_token_id]
snake_case : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase ( self , A , A=False , **A ) -> Dict:
snake_case : str = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A ) > 0 and not text[0].isspace()):
snake_case : Optional[int] = """ """ + text
return (text, kwargs)
def UpperCAmelCase ( self , A , A = None ) -> Dict:
return token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self , A ) -> List[int]:
snake_case : Any = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(A )
snake_case : Optional[Any] = """ """.join(A )
snake_case : List[str] = self.encode(A )
if len(A ) > self.model_max_length:
snake_case : Any = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 702 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase : Tuple = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 684 | 0 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowerCamelCase : int = logging.get_logger(__name__)
class __lowercase :
"""simple docstring"""
def __init__( self , A , A ) -> str:
snake_case : Dict = question_encoder
snake_case : Optional[Any] = generator
snake_case : Tuple = self.question_encoder
def UpperCAmelCase ( self , A ) -> Optional[int]:
if os.path.isfile(A ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(A , exist_ok=A )
snake_case : Tuple = os.path.join(A , """question_encoder_tokenizer""" )
snake_case : List[Any] = os.path.join(A , """generator_tokenizer""" )
self.question_encoder.save_pretrained(A )
self.generator.save_pretrained(A )
@classmethod
def UpperCAmelCase ( cls , A , **A ) -> Any:
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
snake_case : int = kwargs.pop("""config""" , A )
if config is None:
snake_case : List[str] = RagConfig.from_pretrained(A )
snake_case : Optional[Any] = AutoTokenizer.from_pretrained(
A , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
snake_case : Optional[int] = AutoTokenizer.from_pretrained(
A , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=A , generator=A )
def __call__( self , *A , **A ) -> str:
return self.current_tokenizer(*A , **A )
def UpperCAmelCase ( self , *A , **A ) -> Dict:
return self.generator.batch_decode(*A , **A )
def UpperCAmelCase ( self , *A , **A ) -> List[str]:
return self.generator.decode(*A , **A )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : List[Any] = self.question_encoder
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : int = self.generator
def UpperCAmelCase ( self , A , A = None , A = None , A = None , A = "longest" , A = None , A = True , **A , ) -> BatchEncoding:
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , A , )
if max_length is None:
snake_case : List[str] = self.current_tokenizer.model_max_length
snake_case : List[Any] = self(
A , add_special_tokens=A , return_tensors=A , max_length=A , padding=A , truncation=A , **A , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
snake_case : Optional[Any] = self.current_tokenizer.model_max_length
snake_case : Dict = self(
text_target=A , add_special_tokens=A , return_tensors=A , padding=A , max_length=A , truncation=A , **A , )
snake_case : int = labels["""input_ids"""]
return model_inputs
| 703 |
lowerCamelCase : Union[str, Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCamelCase : Tuple = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCamelCase : Union[str, Any] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 684 | 0 |
'''simple docstring'''
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase : Any = logging.get_logger(__name__)
class __lowercase (enum.Enum ):
"""simple docstring"""
_snake_case = 0
_snake_case = 1
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """generated"""
def __init__( self , *A , **A ) -> Optional[Any]:
super().__init__(*A , **A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase ( self , A=None , A=None , A=None , A=None , A=None , A=None , **A , ) -> Optional[int]:
snake_case : Tuple = {}
if truncation is not None:
snake_case : Union[str, Any] = truncation
snake_case : Dict = generate_kwargs
snake_case : int = {}
if return_tensors is not None and return_type is None:
snake_case : List[Any] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
snake_case : List[str] = return_type
if clean_up_tokenization_spaces is not None:
snake_case : int = clean_up_tokenization_spaces
if stop_sequence is not None:
snake_case : Tuple = self.tokenizer.encode(A , add_special_tokens=A )
if len(A ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
snake_case : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
return True
def UpperCAmelCase ( self , *A , A ) -> Tuple:
snake_case : Union[str, Any] = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , A ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
snake_case : Union[str, Any] = ([prefix + arg for arg in args[0]],)
snake_case : List[Any] = True
elif isinstance(args[0] , A ):
snake_case : str = (prefix + args[0],)
snake_case : str = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
snake_case : Optional[Any] = self.tokenizer(*A , padding=A , truncation=A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *A , **A ) -> Union[str, Any]:
snake_case : Tuple = super().__call__(*A , **A )
if (
isinstance(args[0] , A )
and all(isinstance(A , A ) for el in args[0] )
and all(len(A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase ( self , A , A=TruncationStrategy.DO_NOT_TRUNCATE , **A ) -> str:
snake_case : Optional[Any] = self._parse_and_tokenize(A , truncation=A , **A )
return inputs
def UpperCAmelCase ( self , A , **A ) -> Tuple:
if self.framework == "pt":
snake_case : List[str] = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
snake_case : Optional[Any] = tf.shape(model_inputs["""input_ids"""] ).numpy()
snake_case : Dict = generate_kwargs.get("""min_length""" , self.model.config.min_length )
snake_case : str = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(A , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
snake_case : List[str] = self.model.generate(**A , **A )
snake_case : Dict = output_ids.shape[0]
if self.framework == "pt":
snake_case : List[Any] = output_ids.reshape(A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
snake_case : Any = tf.reshape(A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase ( self , A , A=ReturnType.TEXT , A=False ) -> Union[str, Any]:
snake_case : Tuple = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
snake_case : Dict = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
snake_case : int = {
f"""{self.return_name}_text""": self.tokenizer.decode(
A , skip_special_tokens=A , clean_up_tokenization_spaces=A , )
}
records.append(A )
return records
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """summary"""
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
def UpperCAmelCase ( self , A , A , A ) -> bool:
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """translation"""
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def UpperCAmelCase ( self , *A , A=TruncationStrategy.DO_NOT_TRUNCATE , A=None , A=None ) -> Optional[int]:
if getattr(self.tokenizer , """_build_translation_inputs""" , A ):
return self.tokenizer._build_translation_inputs(
*A , return_tensors=self.framework , truncation=A , src_lang=A , tgt_lang=A )
else:
return super()._parse_and_tokenize(*A , truncation=A )
def UpperCAmelCase ( self , A=None , A=None , **A ) -> Union[str, Any]:
snake_case : str = super()._sanitize_parameters(**A )
if src_lang is not None:
snake_case : Tuple = src_lang
if tgt_lang is not None:
snake_case : str = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
snake_case : Union[str, Any] = kwargs.get("""task""" , self.task )
snake_case : Any = task.split("""_""" )
if task and len(A ) == 4:
# translation, XX, to YY
snake_case : Optional[Any] = items[1]
snake_case : Dict = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
| 704 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'vocab_file': 'spm_char.model'}
lowerCamelCase : List[str] = {
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
lowerCamelCase : List[Any] = {
'microsoft/speecht5_asr': 1_0_2_4,
'microsoft/speecht5_tts': 1_0_2_4,
'microsoft/speecht5_vc': 1_0_2_4,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A="<s>" , A="</s>" , A="<unk>" , A="<pad>" , A = None , **A , ) -> None:
snake_case : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , pad_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
snake_case : Tuple = vocab_file
snake_case : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def UpperCAmelCase ( self ) -> List[Any]:
return self.sp_model.get_piece_size()
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Any = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[str]:
snake_case : Optional[Any] = self.__dict__.copy()
snake_case : Optional[Any] = None
return state
def __setstate__( self , A ) -> Tuple:
snake_case : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case : List[Any] = {}
snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def UpperCAmelCase ( self , A ) -> Tuple:
return self.sp_model.piece_to_id(A )
def UpperCAmelCase ( self , A ) -> int:
snake_case : Union[str, Any] = self.sp_model.IdToPiece(A )
return token
def UpperCAmelCase ( self , A ) -> Tuple:
snake_case : Optional[int] = []
snake_case : str = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A ) + token
snake_case : Dict = []
else:
current_sub_tokens.append(A )
out_string += self.sp_model.decode(A )
return out_string.strip()
def UpperCAmelCase ( self , A , A=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
snake_case : Any = [1]
if token_ids_a is None:
return ([0] * len(A )) + suffix_ones
return ([0] * len(A )) + ([0] * len(A )) + suffix_ones
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Optional[Any] = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , """wb""" ) as fi:
snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 684 | 0 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase :
"""simple docstring"""
def __init__( self , A , A=1_3 , A=7 , A=True , A=True , A=True , A=True , A=9_9 , A=3_2 , A=5 , A=4 , A=3_7 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=1_6 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> Tuple:
snake_case : Union[str, Any] = parent
snake_case : Union[str, Any] = batch_size
snake_case : Optional[Any] = seq_length
snake_case : Tuple = is_training
snake_case : int = use_input_mask
snake_case : Optional[int] = use_token_type_ids
snake_case : Dict = use_labels
snake_case : List[str] = vocab_size
snake_case : Optional[int] = hidden_size
snake_case : List[str] = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Union[str, Any] = intermediate_size
snake_case : Any = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : Dict = attention_probs_dropout_prob
snake_case : Any = max_position_embeddings
snake_case : Tuple = type_vocab_size
snake_case : Optional[Any] = type_sequence_label_size
snake_case : Optional[int] = initializer_range
snake_case : str = num_labels
snake_case : List[str] = num_choices
snake_case : List[str] = scope
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : int = None
if self.use_input_mask:
snake_case : Dict = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : List[str] = None
if self.use_token_type_ids:
snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case : Optional[Any] = None
snake_case : Union[str, Any] = None
snake_case : Union[str, Any] = None
if self.use_labels:
snake_case : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : Any = ids_tensor([self.batch_size] , self.num_choices )
snake_case : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self ) -> List[str]:
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Any:
snake_case : List[Any] = NystromformerModel(config=A )
model.to(A )
model.eval()
snake_case : str = model(A , attention_mask=A , token_type_ids=A )
snake_case : str = model(A , token_type_ids=A )
snake_case : Union[str, Any] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Any:
snake_case : List[Any] = NystromformerForMaskedLM(config=A )
model.to(A )
model.eval()
snake_case : List[Any] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Any:
snake_case : str = NystromformerForQuestionAnswering(config=A )
model.to(A )
model.eval()
snake_case : Dict = model(
A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> int:
snake_case : int = self.num_labels
snake_case : Dict = NystromformerForSequenceClassification(A )
model.to(A )
model.eval()
snake_case : Dict = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Any:
snake_case : str = self.num_labels
snake_case : List[str] = NystromformerForTokenClassification(config=A )
model.to(A )
model.eval()
snake_case : str = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Any:
snake_case : Any = self.num_choices
snake_case : Tuple = NystromformerForMultipleChoice(config=A )
model.to(A )
model.eval()
snake_case : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : List[str] = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ) -> Any:
snake_case : List[str] = self.prepare_config_and_inputs()
(
snake_case
) : Tuple = config_and_inputs
snake_case : str = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowercase (UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
_snake_case = (
{
"""feature-extraction""": NystromformerModel,
"""fill-mask""": NystromformerForMaskedLM,
"""question-answering""": NystromformerForQuestionAnswering,
"""text-classification""": NystromformerForSequenceClassification,
"""token-classification""": NystromformerForTokenClassification,
"""zero-shot""": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case = False
_snake_case = False
def UpperCAmelCase ( self ) -> str:
snake_case : str = NystromformerModelTester(self )
snake_case : Tuple = ConfigTester(self , config_class=A , hidden_size=3_7 )
def UpperCAmelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> str:
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case : int = type
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A )
def UpperCAmelCase ( self ) -> str:
snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Any = NystromformerModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> Any:
snake_case : Optional[int] = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" )
snake_case : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
snake_case : Any = model(A )[0]
snake_case : Optional[Any] = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , A )
snake_case : List[str] = torch.tensor(
[[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A , atol=1e-4 ) )
@slow
def UpperCAmelCase ( self ) -> Tuple:
snake_case : int = """the [MASK] of Belgium is Brussels"""
snake_case : Union[str, Any] = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" )
snake_case : str = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" )
snake_case : Tuple = tokenizer(A , return_tensors="""pt""" )
with torch.no_grad():
snake_case : int = model(encoding.input_ids ).logits
snake_case : Dict = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(A ) , """capital""" )
| 705 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """gpt_neox_japanese"""
def __init__( self , A=3_2_0_0_0 , A=2_5_6_0 , A=3_2 , A=3_2 , A=4 , A="gelu" , A=1.00 , A=1_0_0_0_0 , A=2_0_4_8 , A=0.02 , A=1e-5 , A=True , A=3_1_9_9_6 , A=3_1_9_9_9 , A=0.1 , A=0.0 , **A , ) -> str:
super().__init__(bos_token_id=A , eos_token_id=A , **A )
snake_case : Optional[Any] = vocab_size
snake_case : Optional[Any] = max_position_embeddings
snake_case : Union[str, Any] = hidden_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : Optional[int] = intermediate_multiple_size
snake_case : int = hidden_act
snake_case : str = rotary_pct
snake_case : Optional[Any] = rotary_emb_base
snake_case : Any = initializer_range
snake_case : Any = layer_norm_eps
snake_case : Optional[Any] = use_cache
snake_case : Tuple = attention_dropout
snake_case : Tuple = hidden_dropout
| 684 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""image_processor""", """tokenizer"""]
_snake_case = """LayoutLMv2ImageProcessor"""
_snake_case = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , A=None , A=None , **A ) -> Optional[Any]:
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , A , )
snake_case : Optional[Any] = kwargs.pop("""feature_extractor""" )
snake_case : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(A , A )
def __call__( self , A , A = None , A = None , A = None , A = None , A = True , A = False , A = None , A = None , A = 0 , A = None , A = None , A = None , A = False , A = False , A = False , A = False , A = True , A = None , **A , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes """
"""if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("""You cannot return overflowing tokens without returning the offsets mapping.""" )
# first, apply the image processor
snake_case : Tuple = self.image_processor(images=A , return_tensors=A )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(A , A ):
snake_case : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case : Optional[Any] = features["""words"""]
snake_case : List[Any] = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=A , add_special_tokens=A , padding=A , truncation=A , max_length=A , stride=A , pad_to_multiple_of=A , return_token_type_ids=A , return_attention_mask=A , return_overflowing_tokens=A , return_special_tokens_mask=A , return_offsets_mapping=A , return_length=A , verbose=A , return_tensors=A , **A , )
# add pixel values
snake_case : Dict = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
snake_case : Optional[int] = self.get_overflowing_images(A , encoded_inputs["""overflow_to_sample_mapping"""] )
snake_case : Optional[Any] = images
return encoded_inputs
def UpperCAmelCase ( self , A , A ) -> List[Any]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
snake_case : List[Any] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(A ) != len(A ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f""" {len(A )} and {len(A )}""" )
return images_with_overflow
def UpperCAmelCase ( self , *A , **A ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*A , **A )
def UpperCAmelCase ( self , *A , **A ) -> Optional[Any]:
return self.tokenizer.decode(*A , **A )
@property
def UpperCAmelCase ( self ) -> Optional[int]:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , A , )
return self.image_processor_class
@property
def UpperCAmelCase ( self ) -> Optional[int]:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , A , )
return self.image_processor
| 706 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Optional[Any] = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
snake_case : Any = hex_num[0] == """-"""
if is_negative:
snake_case : int = hex_num[1:]
try:
snake_case : List[Any] = int(lowercase ,16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
snake_case : Dict = """"""
while int_num > 0:
snake_case : Dict = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = (UniPCMultistepScheduler,)
_snake_case = (("""num_inference_steps""", 25),)
def UpperCAmelCase ( self , **A ) -> Optional[Any]:
snake_case : Union[str, Any] = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""solver_type""": """bh2""",
}
config.update(**A )
return config
def UpperCAmelCase ( self , A=0 , **A ) -> int:
snake_case : Dict = dict(self.forward_default_kwargs )
snake_case : Tuple = kwargs.pop("""num_inference_steps""" , A )
snake_case : List[Any] = self.dummy_sample
snake_case : List[Any] = 0.1 * sample
snake_case : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case : Union[str, Any] = self.get_scheduler_config(**A )
snake_case : Union[str, Any] = scheduler_class(**A )
scheduler.set_timesteps(A )
# copy over dummy past residuals
snake_case : Any = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A )
snake_case : Optional[Any] = scheduler_class.from_pretrained(A )
new_scheduler.set_timesteps(A )
# copy over dummy past residuals
snake_case : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case : Union[str, Any] = sample, sample
for t in range(A , time_step + scheduler.config.solver_order + 1 ):
snake_case : Tuple = scheduler.step(A , A , A , **A ).prev_sample
snake_case : Optional[int] = new_scheduler.step(A , A , A , **A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self , A=0 , **A ) -> Tuple:
snake_case : Union[str, Any] = dict(self.forward_default_kwargs )
snake_case : List[Any] = kwargs.pop("""num_inference_steps""" , A )
snake_case : Optional[int] = self.dummy_sample
snake_case : List[Any] = 0.1 * sample
snake_case : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case : Any = self.get_scheduler_config()
snake_case : str = scheduler_class(**A )
scheduler.set_timesteps(A )
# copy over dummy past residuals (must be after setting timesteps)
snake_case : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A )
snake_case : List[Any] = scheduler_class.from_pretrained(A )
# copy over dummy past residuals
new_scheduler.set_timesteps(A )
# copy over dummy past residual (must be after setting timesteps)
snake_case : str = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case : str = scheduler.step(A , A , A , **A ).prev_sample
snake_case : Tuple = new_scheduler.step(A , A , A , **A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self , A=None , **A ) -> Optional[int]:
if scheduler is None:
snake_case : Tuple = self.scheduler_classes[0]
snake_case : Dict = self.get_scheduler_config(**A )
snake_case : List[str] = scheduler_class(**A )
snake_case : str = self.scheduler_classes[0]
snake_case : Optional[Any] = self.get_scheduler_config(**A )
snake_case : Any = scheduler_class(**A )
snake_case : Tuple = 1_0
snake_case : Tuple = self.dummy_model()
snake_case : Any = self.dummy_sample_deter
scheduler.set_timesteps(A )
for i, t in enumerate(scheduler.timesteps ):
snake_case : Tuple = model(A , A )
snake_case : Tuple = scheduler.step(A , A , A ).prev_sample
return sample
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : List[Any] = dict(self.forward_default_kwargs )
snake_case : Optional[int] = kwargs.pop("""num_inference_steps""" , A )
for scheduler_class in self.scheduler_classes:
snake_case : Optional[int] = self.get_scheduler_config()
snake_case : Optional[Any] = scheduler_class(**A )
snake_case : str = self.dummy_sample
snake_case : int = 0.1 * sample
if num_inference_steps is not None and hasattr(A , """set_timesteps""" ):
scheduler.set_timesteps(A )
elif num_inference_steps is not None and not hasattr(A , """set_timesteps""" ):
snake_case : Tuple = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
snake_case : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
snake_case : int = scheduler.timesteps[5]
snake_case : Any = scheduler.timesteps[6]
snake_case : Optional[Any] = scheduler.step(A , A , A , **A ).prev_sample
snake_case : List[Any] = scheduler.step(A , A , A , **A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase ( self ) -> Dict:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
snake_case : Tuple = UniPCMultistepScheduler(**self.get_scheduler_config() )
snake_case : Union[str, Any] = self.full_loop(scheduler=A )
snake_case : Any = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.24_64 ) < 1e-3
snake_case : Any = DPMSolverSinglestepScheduler.from_config(scheduler.config )
snake_case : Union[str, Any] = DEISMultistepScheduler.from_config(scheduler.config )
snake_case : List[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
snake_case : List[str] = UniPCMultistepScheduler.from_config(scheduler.config )
snake_case : Optional[Any] = self.full_loop(scheduler=A )
snake_case : int = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.24_64 ) < 1e-3
def UpperCAmelCase ( self ) -> Dict:
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=A )
def UpperCAmelCase ( self ) -> Any:
self.check_over_configs(thresholding=A )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=A , prediction_type=A , sample_max_value=A , solver_order=A , solver_type=A , )
def UpperCAmelCase ( self ) -> List[str]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A )
def UpperCAmelCase ( self ) -> List[Any]:
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=A , solver_type=A , prediction_type=A , )
snake_case : Dict = self.full_loop(
solver_order=A , solver_type=A , prediction_type=A , )
assert not torch.isnan(A ).any(), "Samples have nan numbers"
def UpperCAmelCase ( self ) -> List[str]:
self.check_over_configs(lower_order_final=A )
self.check_over_configs(lower_order_final=A )
def UpperCAmelCase ( self ) -> int:
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=A , time_step=0 )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : int = self.full_loop()
snake_case : Dict = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.24_64 ) < 1e-3
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Any = self.full_loop(prediction_type="""v_prediction""" )
snake_case : Tuple = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.10_14 ) < 1e-3
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Dict = self.scheduler_classes[0]
snake_case : List[str] = self.get_scheduler_config(thresholding=A , dynamic_thresholding_ratio=0 )
snake_case : List[Any] = scheduler_class(**A )
snake_case : int = 1_0
snake_case : Tuple = self.dummy_model()
snake_case : Any = self.dummy_sample_deter.half()
scheduler.set_timesteps(A )
for i, t in enumerate(scheduler.timesteps ):
snake_case : Union[str, Any] = model(A , A )
snake_case : int = scheduler.step(A , A , A ).prev_sample
assert sample.dtype == torch.floataa
def UpperCAmelCase ( self , **A ) -> Optional[int]:
for scheduler_class in self.scheduler_classes:
snake_case : Optional[int] = self.get_scheduler_config(**A )
snake_case : Dict = scheduler_class(**A )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 707 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""pixel_values"""]
def __init__( self , A = True , A = None , A = PIL.Image.BICUBIC , A = True , A = None , A = 1 / 2_5_5 , A = True , A = True , A = None , A = None , **A , ) -> None:
super().__init__(**A )
snake_case : int = size if size is not None else {"""height""": 2_5_6, """width""": 2_5_6}
snake_case : int = get_size_dict(A )
snake_case : Optional[Any] = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
snake_case : Dict = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = do_resize
snake_case : str = size
snake_case : Tuple = resample
snake_case : Any = do_center_crop
snake_case : Tuple = crop_size
snake_case : int = do_rescale
snake_case : Dict = rescale_factor
snake_case : Union[str, Any] = do_normalize
snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase ( self , A , A , A = PIL.Image.BICUBIC , A = None , **A , ) -> np.ndarray:
snake_case : Dict = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
A , size=(size["""height"""], size["""width"""]) , resample=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> np.ndarray:
snake_case : Any = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(A , size=(size["""height"""], size["""width"""]) , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> Tuple:
return rescale(A , scale=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A , A = None , **A , ) -> np.ndarray:
return normalize(A , mean=A , std=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A = None , A = None , A=None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
snake_case : str = do_resize if do_resize is not None else self.do_resize
snake_case : Dict = resample if resample is not None else self.resample
snake_case : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Tuple = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case : int = image_mean if image_mean is not None else self.image_mean
snake_case : List[str] = image_std if image_std is not None else self.image_std
snake_case : Dict = size if size is not None else self.size
snake_case : Tuple = get_size_dict(A )
snake_case : Dict = crop_size if crop_size is not None else self.crop_size
snake_case : List[str] = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case : Optional[Any] = [to_numpy_array(A ) for image in images]
if do_resize:
snake_case : Dict = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_center_crop:
snake_case : List[str] = [self.center_crop(image=A , size=A ) for image in images]
if do_rescale:
snake_case : List[str] = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
snake_case : str = [self.normalize(image=A , mean=A , std=A ) for image in images]
snake_case : Union[str, Any] = [to_channel_dimension_format(A , A ) for image in images]
snake_case : List[Any] = {"""pixel_values""": images}
return BatchFeature(data=A , tensor_type=A )
| 684 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowercase = 600851475143 ) -> int:
try:
snake_case : Optional[int] = int(lowercase )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
snake_case : str = 2
snake_case : str = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
snake_case : Any = i
while n % i == 0:
snake_case : Dict = n // i
i += 1
return int(lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 708 |
import inspect
import unittest
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase ( self ) -> Tuple:
import diffusers
from diffusers.dependency_versions_table import deps
snake_case : List[str] = inspect.getmembers(A , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
snake_case : Tuple = """k-diffusion"""
elif backend == "invisible_watermark":
snake_case : Optional[int] = """invisible-watermark"""
assert backend in deps, f"""{backend} is not in the deps table!"""
| 684 | 0 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
lowerCamelCase : Union[str, Any] = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
lowerCamelCase : List[Any] = 'main'
# Default branch name
lowerCamelCase : Tuple = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
lowerCamelCase : List[Any] = 'aaaaaaa'
# This commit does not exist, so we should 404.
lowerCamelCase : List[Any] = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
lowerCamelCase : int = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> int:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> Optional[Any]:
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def UpperCAmelCase ( self ) -> Optional[Any]:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_tf
def UpperCAmelCase ( self ) -> str:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_flax
def UpperCAmelCase ( self ) -> Any:
# Flax models don't have labels
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , [] )
| 709 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
lowerCamelCase : Union[str, Any] = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
lowerCamelCase : List[Any] = 'main'
# Default branch name
lowerCamelCase : Tuple = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
lowerCamelCase : List[Any] = 'aaaaaaa'
# This commit does not exist, so we should 404.
lowerCamelCase : List[Any] = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
lowerCamelCase : int = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> int:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> Optional[Any]:
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def UpperCAmelCase ( self ) -> Optional[Any]:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_tf
def UpperCAmelCase ( self ) -> str:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_flax
def UpperCAmelCase ( self ) -> Any:
# Flax models don't have labels
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , [] )
| 684 | 0 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'vocab_file': 'spiece.model'}
lowerCamelCase : str = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
lowerCamelCase : List[str] = {
'AI-Sweden/gpt-sw3-126m': 2_0_4_8,
'AI-Sweden/gpt-sw3-350m': 2_0_4_8,
'AI-Sweden/gpt-sw3-1.6b': 2_0_4_8,
'AI-Sweden/gpt-sw3-6.7b': 2_0_4_8,
'AI-Sweden/gpt-sw3-20b': 2_0_4_8,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A=False , A=False , A=False , A=None , A=None , A=None , A=None , A = None , **A , ) -> None:
snake_case : int = {} if sp_model_kwargs is None else sp_model_kwargs
snake_case : int = kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
snake_case : Any = """None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
snake_case : Tuple = """<|endoftext|>""" if eos_token is None else eos_token
snake_case : List[Any] = """<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
snake_case : List[str] = unk_token if pad_token is None else pad_token
snake_case : Tuple = eos_token if bos_token is None else bos_token
else:
snake_case : Optional[int] = """<pad>""" if pad_token is None else pad_token
snake_case : str = """<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , pad_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
snake_case : Dict = do_lower_case
snake_case : List[str] = remove_space
snake_case : Union[str, Any] = keep_accents
snake_case : List[str] = vocab_file
snake_case : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
# Used for whitespace normalization in input texts
# fmt : off
snake_case : Dict = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
snake_case : Union[str, Any] = re.compile(
f"""[{"".join(map(A , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]""" )
def __getstate__( self ) -> Any:
snake_case : Any = self.__dict__.copy()
snake_case : Dict = None
return state
def __setstate__( self , A ) -> List[str]:
snake_case : str = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case : str = {}
snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def UpperCAmelCase ( self ) -> int:
return len(self.sp_model )
def UpperCAmelCase ( self , A ) -> str:
snake_case : Union[str, Any] = self.non_printing_characters_re.sub("""""" , A )
# Normalize whitespaces
snake_case : int = """""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
snake_case : str = unicodedata.normalize("""NFC""" , A )
return text
def UpperCAmelCase ( self , A , **A ) -> List[str]:
snake_case : Union[str, Any] = self.preprocess_text(A )
return self.sp_model.encode(A , out_type=A )
def UpperCAmelCase ( self , A ) -> int:
return self.sp_model.PieceToId(A )
def UpperCAmelCase ( self , A ) -> str:
return self.sp_model.IdToPiece(A )
@staticmethod
def UpperCAmelCase ( A ) -> str:
return out_string
def UpperCAmelCase ( self , A ) -> str:
snake_case : int = []
snake_case : str = """"""
snake_case : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
snake_case : str = True
snake_case : Dict = []
else:
current_sub_tokens.append(A )
snake_case : Any = False
out_string += self.sp_model.decode(A )
return out_string
def UpperCAmelCase ( self ) -> Dict[str, int]:
snake_case : int = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : List[Any] = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , """wb""" ) as fi:
snake_case : Any = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
def UpperCAmelCase ( self , A , A = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(A , A ):
snake_case : int = self.preprocess_text(A )
snake_case : str = self.sp_model.encode(A )
else:
snake_case : Optional[int] = [self.preprocess_text(A ) for t in text]
snake_case : Any = self.sp_model.encode(A )
if return_tensors is True or return_tensors == "pt":
snake_case : Any = torch.tensor(A )
return token_ids
def UpperCAmelCase ( self , A ) -> str:
return self.sp_model.decode(A )
def UpperCAmelCase ( self , A ) -> List[int]:
snake_case : List[Any] = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
snake_case : int = (
f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(A ) + f"""{self.bos_token}Bot:"""
)
return self.encode(text=A )
| 710 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """audio-spectrogram-transformer"""
def __init__( self , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.0 , A=0.0 , A=0.02 , A=1e-1_2 , A=1_6 , A=True , A=1_0 , A=1_0 , A=1_0_2_4 , A=1_2_8 , **A , ) -> int:
super().__init__(**A )
snake_case : Any = hidden_size
snake_case : Tuple = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Dict = intermediate_size
snake_case : int = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : int = initializer_range
snake_case : int = layer_norm_eps
snake_case : Any = patch_size
snake_case : List[Any] = qkv_bias
snake_case : int = frequency_stride
snake_case : Any = time_stride
snake_case : Union[str, Any] = max_length
snake_case : Any = num_mel_bins
| 684 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list:
for i in range(len(lowercase ) - 1 ,0 ,-1 ):
snake_case : Any = False
for j in range(lowercase ,0 ,-1 ):
if unsorted[j] < unsorted[j - 1]:
snake_case : Optional[Any] = unsorted[j - 1], unsorted[j]
snake_case : Dict = True
for j in range(lowercase ):
if unsorted[j] > unsorted[j + 1]:
snake_case : Dict = unsorted[j + 1], unsorted[j]
snake_case : Tuple = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Any = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase : Optional[int] = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 711 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase : Any = logging.get_logger(__name__)
class __lowercase (enum.Enum ):
"""simple docstring"""
_snake_case = 0
_snake_case = 1
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """generated"""
def __init__( self , *A , **A ) -> Optional[Any]:
super().__init__(*A , **A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase ( self , A=None , A=None , A=None , A=None , A=None , A=None , **A , ) -> Optional[int]:
snake_case : Tuple = {}
if truncation is not None:
snake_case : Union[str, Any] = truncation
snake_case : Dict = generate_kwargs
snake_case : int = {}
if return_tensors is not None and return_type is None:
snake_case : List[Any] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
snake_case : List[str] = return_type
if clean_up_tokenization_spaces is not None:
snake_case : int = clean_up_tokenization_spaces
if stop_sequence is not None:
snake_case : Tuple = self.tokenizer.encode(A , add_special_tokens=A )
if len(A ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
snake_case : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
return True
def UpperCAmelCase ( self , *A , A ) -> Tuple:
snake_case : Union[str, Any] = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , A ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
snake_case : Union[str, Any] = ([prefix + arg for arg in args[0]],)
snake_case : List[Any] = True
elif isinstance(args[0] , A ):
snake_case : str = (prefix + args[0],)
snake_case : str = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
snake_case : Optional[Any] = self.tokenizer(*A , padding=A , truncation=A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *A , **A ) -> Union[str, Any]:
snake_case : Tuple = super().__call__(*A , **A )
if (
isinstance(args[0] , A )
and all(isinstance(A , A ) for el in args[0] )
and all(len(A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase ( self , A , A=TruncationStrategy.DO_NOT_TRUNCATE , **A ) -> str:
snake_case : Optional[Any] = self._parse_and_tokenize(A , truncation=A , **A )
return inputs
def UpperCAmelCase ( self , A , **A ) -> Tuple:
if self.framework == "pt":
snake_case , snake_case : List[str] = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
snake_case , snake_case : Optional[Any] = tf.shape(model_inputs["""input_ids"""] ).numpy()
snake_case : Dict = generate_kwargs.get("""min_length""" , self.model.config.min_length )
snake_case : str = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(A , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
snake_case : List[str] = self.model.generate(**A , **A )
snake_case : Dict = output_ids.shape[0]
if self.framework == "pt":
snake_case : List[Any] = output_ids.reshape(A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
snake_case : Any = tf.reshape(A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase ( self , A , A=ReturnType.TEXT , A=False ) -> Union[str, Any]:
snake_case : Tuple = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
snake_case : Dict = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
snake_case : int = {
f"""{self.return_name}_text""": self.tokenizer.decode(
A , skip_special_tokens=A , clean_up_tokenization_spaces=A , )
}
records.append(A )
return records
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """summary"""
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
def UpperCAmelCase ( self , A , A , A ) -> bool:
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """translation"""
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def UpperCAmelCase ( self , *A , A=TruncationStrategy.DO_NOT_TRUNCATE , A=None , A=None ) -> Optional[int]:
if getattr(self.tokenizer , """_build_translation_inputs""" , A ):
return self.tokenizer._build_translation_inputs(
*A , return_tensors=self.framework , truncation=A , src_lang=A , tgt_lang=A )
else:
return super()._parse_and_tokenize(*A , truncation=A )
def UpperCAmelCase ( self , A=None , A=None , **A ) -> Union[str, Any]:
snake_case , snake_case , snake_case : str = super()._sanitize_parameters(**A )
if src_lang is not None:
snake_case : Tuple = src_lang
if tgt_lang is not None:
snake_case : str = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
snake_case : Union[str, Any] = kwargs.get("""task""" , self.task )
snake_case : Any = task.split("""_""" )
if task and len(A ) == 4:
# translation, XX, to YY
snake_case : Optional[Any] = items[1]
snake_case : Dict = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
| 684 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
assert column_title.isupper()
snake_case : List[str] = 0
snake_case : Tuple = len(lowercase ) - 1
snake_case : Any = 0
while index >= 0:
snake_case : Optional[int] = (ord(column_title[index] ) - 64) * pow(26 ,lowercase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 712 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
snake_case : int = []
for line in lines:
snake_case : Dict = re.sub(R"""#.*""" ,"""""" ,lowercase ) # remove comments
if line:
filtered_lines.append(lowercase )
snake_case : Optional[int] = """\n""".join(lowercase )
# Make a hash from all this code
snake_case : List[str] = full_str.encode("""utf-8""" )
return shaaaa(lowercase ).hexdigest()
# get importable module names and hash for caching
lowerCamelCase : Any = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
lowerCamelCase : Optional[int] = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
lowerCamelCase : Tuple = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
lowerCamelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 684 | 0 |
from collections.abc import Callable
class __lowercase :
"""simple docstring"""
def __init__( self , A = None ) -> None:
# Stores actual heap items.
snake_case : list = []
# Stores indexes of each item for supporting updates and deletion.
snake_case : dict = {}
# Stores current size of heap.
snake_case : Union[str, Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
snake_case : Any = key or (lambda A : x)
def UpperCAmelCase ( self , A ) -> int | None:
return int((i - 1) / 2 ) if i > 0 else None
def UpperCAmelCase ( self , A ) -> int | None:
snake_case : List[Any] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def UpperCAmelCase ( self , A ) -> int | None:
snake_case : Union[str, Any] = int(2 * i + 2 )
return right if 0 < right < self.size else None
def UpperCAmelCase ( self , A , A ) -> None:
snake_case : Optional[Any] = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
snake_case : List[Any] = self.arr[j], self.arr[i]
def UpperCAmelCase ( self , A , A ) -> bool:
return self.arr[i][1] < self.arr[j][1]
def UpperCAmelCase ( self , A ) -> int:
snake_case : Dict = self._left(A )
snake_case : Optional[Any] = self._right(A )
snake_case : str = i
if left is not None and not self._cmp(A , A ):
snake_case : str = left
if right is not None and not self._cmp(A , A ):
snake_case : Optional[Any] = right
return valid_parent
def UpperCAmelCase ( self , A ) -> None:
snake_case : List[Any] = self._parent(A )
while parent is not None and not self._cmp(A , A ):
self._swap(A , A )
snake_case : Optional[Any] = parent, self._parent(A )
def UpperCAmelCase ( self , A ) -> None:
snake_case : Any = self._get_valid_parent(A )
while valid_parent != index:
self._swap(A , A )
snake_case : Union[str, Any] = valid_parent, self._get_valid_parent(A )
def UpperCAmelCase ( self , A , A ) -> None:
if item not in self.pos_map:
return
snake_case : Dict = self.pos_map[item]
snake_case : Tuple = [item, self.key(A )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(A )
self._heapify_down(A )
def UpperCAmelCase ( self , A ) -> None:
if item not in self.pos_map:
return
snake_case : Optional[int] = self.pos_map[item]
del self.pos_map[item]
snake_case : int = self.arr[self.size - 1]
snake_case : List[Any] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(A )
self._heapify_down(A )
def UpperCAmelCase ( self , A , A ) -> None:
snake_case : Union[str, Any] = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(A )] )
else:
snake_case : List[Any] = [item, self.key(A )]
snake_case : List[Any] = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def UpperCAmelCase ( self ) -> tuple | None:
return self.arr[0] if self.size else None
def UpperCAmelCase ( self ) -> tuple | None:
snake_case : int = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def SCREAMING_SNAKE_CASE__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Tuple:
# Initialise PyTorch model
snake_case : int = RemBertConfig.from_json_file(lowercase )
print("""Building PyTorch model from configuration: {}""".format(str(lowercase ) ) )
snake_case : Tuple = RemBertModel(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowercase ,lowercase ,lowercase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(lowercase ) )
torch.save(model.state_dict() ,lowercase )
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCamelCase : Dict = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 684 | 0 |
import inspect
import unittest
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase ( self ) -> Tuple:
import diffusers
from diffusers.dependency_versions_table import deps
snake_case : List[str] = inspect.getmembers(A , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
snake_case : Tuple = """k-diffusion"""
elif backend == "invisible_watermark":
snake_case : Optional[int] = """invisible-watermark"""
assert backend in deps, f"""{backend} is not in the deps table!"""
| 714 |
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[str]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Union[str, Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Any:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Dict:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
| 684 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : List[str] = logging.get_logger(__name__)
lowerCamelCase : str = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """swin2sr"""
_snake_case = {
"""hidden_size""": """embed_dim""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , A=6_4 , A=1 , A=3 , A=1_8_0 , A=[6, 6, 6, 6, 6, 6] , A=[6, 6, 6, 6, 6, 6] , A=8 , A=2.0 , A=True , A=0.0 , A=0.0 , A=0.1 , A="gelu" , A=False , A=0.02 , A=1e-5 , A=2 , A=1.0 , A="1conv" , A="pixelshuffle" , **A , ) -> Optional[Any]:
super().__init__(**A )
snake_case : int = image_size
snake_case : Tuple = patch_size
snake_case : Union[str, Any] = num_channels
snake_case : str = embed_dim
snake_case : List[str] = depths
snake_case : Dict = len(A )
snake_case : Any = num_heads
snake_case : List[str] = window_size
snake_case : Tuple = mlp_ratio
snake_case : int = qkv_bias
snake_case : int = hidden_dropout_prob
snake_case : List[str] = attention_probs_dropout_prob
snake_case : int = drop_path_rate
snake_case : List[Any] = hidden_act
snake_case : List[Any] = use_absolute_embeddings
snake_case : List[Any] = layer_norm_eps
snake_case : Tuple = initializer_range
snake_case : Any = upscale
snake_case : Union[str, Any] = img_range
snake_case : Optional[Any] = resi_connection
snake_case : Tuple = upsampler
| 715 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCamelCase : List[str] = 3
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
print("""Generating primitive root of p""" )
while True:
snake_case : Optional[int] = random.randrange(3 ,lowercase )
if pow(lowercase ,2 ,lowercase ) == 1:
continue
if pow(lowercase ,lowercase ,lowercase ) == 1:
continue
return g
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print("""Generating prime p...""" )
snake_case : Optional[int] = rabin_miller.generate_large_prime(lowercase ) # select large prime number.
snake_case : Optional[int] = primitive_root(lowercase ) # one primitive root on modulo p.
snake_case : Optional[Any] = random.randrange(3 ,lowercase ) # private_key -> have to be greater than 2 for safety.
snake_case : Tuple = cryptomath.find_mod_inverse(pow(lowercase ,lowercase ,lowercase ) ,lowercase )
snake_case : str = (key_size, e_a, e_a, p)
snake_case : Optional[Any] = (key_size, d)
return public_key, private_key
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> None:
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print("""\nWARNING:""" )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
snake_case , snake_case : Optional[Any] = generate_key(lowercase )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
print("""Making key files...""" )
make_key_files("""elgamal""" ,2048 )
print("""Key files generation successful""" )
if __name__ == "__main__":
main()
| 684 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def UpperCAmelCase ( self , A ) -> float:
return 0.0
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> tuple[int | float, int | float]:
snake_case : Any = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
snake_case : Optional[int] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> None:
snake_case : Union[str, Any] = 512
snake_case : str = [1] + [0] * (size - 1)
snake_case : Optional[Any] = [filter_type.process(lowercase ) for item in inputs]
snake_case : List[str] = [0] * (samplerate - size) # zero-padding
outputs += filler
snake_case : Union[str, Any] = np.abs(np.fft.fft(lowercase ) )
snake_case : List[str] = 20 * np.logaa(lowercase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 ,samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
snake_case : Optional[int] = get_bounds(lowercase ,lowercase )
plt.ylim(max([-80, bounds[0]] ) ,min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(lowercase )
plt.show()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> None:
snake_case : Optional[int] = 512
snake_case : Any = [1] + [0] * (size - 1)
snake_case : Optional[Any] = [filter_type.process(lowercase ) for item in inputs]
snake_case : str = [0] * (samplerate - size) # zero-padding
outputs += filler
snake_case : Any = np.angle(np.fft.fft(lowercase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 ,samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi ,2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(lowercase ,-2 * pi ) )
plt.show()
| 716 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
snake_case : Dict = _modexpt(lowercase ,exponent // 2 ,lowercase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowercase ,exponent - 1 ,lowercase )) % modulo_value
def SCREAMING_SNAKE_CASE__ ( lowercase = 1777 ,lowercase = 1855 ,lowercase = 8 ) -> int:
snake_case : int = base
for _ in range(1 ,lowercase ):
snake_case : List[str] = _modexpt(lowercase ,lowercase ,10**digits )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
snake_case : str = len(lowercase )
snake_case : Tuple = []
for i in range(len(lowercase ) - pat_len + 1 ):
snake_case : str = True
for j in range(lowercase ):
if s[i + j] != pattern[j]:
snake_case : Dict = False
break
if match_found:
position.append(lowercase )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 717 |
from itertools import product
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list[int]:
snake_case : Tuple = sides_number
snake_case : List[str] = max_face_number * dice_number
snake_case : Any = [0] * (max_total + 1)
snake_case : int = 1
snake_case : List[str] = range(lowercase ,max_face_number + 1 )
for dice_numbers in product(lowercase ,repeat=lowercase ):
snake_case : Any = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def SCREAMING_SNAKE_CASE__ ( ) -> float:
snake_case : List[str] = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
snake_case : str = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
snake_case : Optional[int] = 0
snake_case : List[str] = 9
snake_case : Union[str, Any] = 4 * 9
snake_case : Dict = 6
for peter_total in range(lowercase ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
snake_case : str = (4**9) * (6**6)
snake_case : int = peter_wins_count / total_games_number
snake_case : Optional[int] = round(lowercase ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : Optional[int] = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowerCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 718 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 684 | 0 |
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class __lowercase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , A , A=None , A=True , A=None , **A ) -> Optional[int]:
snake_case : Optional[Any] = parent
snake_case : Tuple = config_class
snake_case : Dict = has_text_modality
snake_case : Optional[Any] = kwargs
snake_case : List[Any] = common_properties
def UpperCAmelCase ( self ) -> Dict:
snake_case : Any = self.config_class(**self.inputs_dict )
snake_case : Optional[Any] = (
["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["""vocab_size"""] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(A , A ) , msg=f"""`{prop}` does not exist""" )
# Test that config has the common properties as setter
for idx, name in enumerate(A ):
try:
setattr(A , A , A )
self.parent.assertEqual(
getattr(A , A ) , A , msg=f"""`{name} value {idx} expected, but was {getattr(A , A )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(A ):
try:
snake_case : List[Any] = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(A , A ) , A , msg=f"""`{name} value {idx} expected, but was {getattr(A , A )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def UpperCAmelCase ( self ) -> Dict:
snake_case : List[str] = self.config_class(**self.inputs_dict )
snake_case : Optional[Any] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , A )
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Optional[int] = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case : Dict = os.path.join(A , """config.json""" )
config_first.to_json_file(A )
snake_case : Any = self.config_class.from_json_file(A )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase ( self ) -> Any:
snake_case : Optional[int] = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(A )
snake_case : List[Any] = self.config_class.from_pretrained(A )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Optional[int] = self.config_class(**self.inputs_dict )
snake_case : Any = """test"""
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case : List[str] = os.path.join(A , A )
config_first.save_pretrained(A )
snake_case : Union[str, Any] = self.config_class.from_pretrained(A , subfolder=A )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Any = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
snake_case : Any = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def UpperCAmelCase ( self ) -> List[str]:
if self.config_class.is_composition:
return
snake_case : Optional[int] = self.config_class()
self.parent.assertIsNotNone(A )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : List[Any] = copy.deepcopy(A )
snake_case : List[Any] = self.config_class(**A )
snake_case : Dict = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) )
elif getattr(A , A ) != value:
wrong_values.append((key, getattr(A , A ), value) )
if len(A ) > 0:
snake_case : Union[str, Any] = """\n""".join([f"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] )
raise ValueError(f"""The following keys were not properly set in the config:\n{errors}""" )
def UpperCAmelCase ( self ) -> List[str]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 719 |
import os
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
with open(os.path.dirname(lowercase ) + """/grid.txt""" ) as f:
snake_case : Tuple = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowercase ) for x in f.readline().split()] )
snake_case : Optional[Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
snake_case : List[Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
snake_case : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
snake_case : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
snake_case : str = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
snake_case : int = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
snake_case : int = temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
snake_case : Any = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
snake_case : Any = temp
return maximum
if __name__ == "__main__":
print(solution())
| 684 | 0 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
lowerCamelCase : Optional[int] = 'facebook/wmt19-en-de'
lowerCamelCase : List[Any] = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
lowerCamelCase : Any = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
lowerCamelCase : List[Any] = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
lowerCamelCase : List[Any] = tokenizer(['Making tiny model'], return_tensors='pt')
lowerCamelCase : int = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
lowerCamelCase : Union[str, Any] = 'tiny-wmt19-en-de'
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 720 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list:
for i in range(len(lowercase ) - 1 ,0 ,-1 ):
snake_case : Any = False
for j in range(lowercase ,0 ,-1 ):
if unsorted[j] < unsorted[j - 1]:
snake_case , snake_case : Optional[Any] = unsorted[j - 1], unsorted[j]
snake_case : Dict = True
for j in range(lowercase ):
if unsorted[j] > unsorted[j + 1]:
snake_case , snake_case : Dict = unsorted[j + 1], unsorted[j]
snake_case : Tuple = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Any = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase : Optional[int] = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 684 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
lowerCamelCase : Optional[int] = pytest.mark.integration
@require_faiss
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : List[str] = Dataset.from_dict({"""filename""": ["""my_name-train""" + """_""" + str(A ) for x in np.arange(3_0 ).tolist()]} )
return dset
def UpperCAmelCase ( self ) -> Any:
import faiss
snake_case : Dataset = self._create_dummy_dataset()
snake_case : Optional[int] = dset.map(
lambda A , A : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=A , keep_in_memory=A )
snake_case : Tuple = dset.add_faiss_index("""vecs""" , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT )
snake_case : Optional[int] = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
dset.drop_index("""vecs""" )
def UpperCAmelCase ( self ) -> List[Any]:
import faiss
snake_case : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name="""vecs""" , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
snake_case : List[str] = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def UpperCAmelCase ( self ) -> Any:
import faiss
snake_case : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name="""vecs""" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=A ) as tmp_file:
dset.save_faiss_index("""vecs""" , tmp_file.name )
dset.load_faiss_index("""vecs2""" , tmp_file.name )
os.unlink(tmp_file.name )
snake_case : Any = dset.get_nearest_examples("""vecs2""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name="""vecs""" )
dset.drop_index("""vecs""" )
self.assertRaises(A , partial(dset.get_nearest_examples , """vecs2""" , np.ones(5 , dtype=np.floataa ) ) )
def UpperCAmelCase ( self ) -> Any:
from elasticsearch import Elasticsearch
snake_case : Dataset = self._create_dummy_dataset()
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
snake_case : int = {"""acknowledged""": True}
mocked_bulk.return_value([(True, None)] * 3_0 )
snake_case : List[str] = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 2_9}]}}
snake_case : Dict = Elasticsearch()
dset.add_elasticsearch_index("""filename""" , es_client=A )
snake_case : Optional[int] = dset.get_nearest_examples("""filename""" , """my_name-train_29""" )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
@require_faiss
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Any:
import faiss
snake_case : str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 1_0 )
# single query
snake_case : Any = np.zeros(5 , dtype=np.floataa )
snake_case : List[str] = 1
snake_case : int = index.search(A )
self.assertRaises(A , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
snake_case : Union[str, Any] = np.eye(5 , dtype=np.floataa )[::-1]
snake_case : List[Any] = index.search_batch(A )
self.assertRaises(A , index.search_batch , queries[0] )
snake_case : Tuple = [scores[0] for scores in total_scores]
snake_case : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , A )
def UpperCAmelCase ( self ) -> Tuple:
import faiss
snake_case : List[str] = FaissIndex(string_factory="""Flat""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
snake_case : List[Any] = FaissIndex(string_factory="""LSH""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(A ):
snake_case : Tuple = FaissIndex(string_factory="""Flat""" , custom_index=faiss.IndexFlat(5 ) )
def UpperCAmelCase ( self ) -> Any:
import faiss
snake_case : Optional[Any] = faiss.IndexFlat(5 )
snake_case : Any = FaissIndex(custom_index=A )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def UpperCAmelCase ( self ) -> List[Any]:
import faiss
snake_case : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=A ) as tmp_file:
index.save(tmp_file.name )
snake_case : Dict = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
snake_case : Tuple = np.zeros(5 , dtype=np.floataa )
snake_case : Optional[int] = 1
snake_case : List[Any] = index.search(A )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Any:
import faiss
snake_case : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
snake_case : int = """index.faiss"""
snake_case : int = f"""mock://{index_name}"""
index.save(lowercase ,storage_options=mockfs.storage_options )
snake_case : Dict = FaissIndex.load(lowercase ,storage_options=mockfs.storage_options )
snake_case : Dict = np.zeros(5 ,dtype=np.floataa )
snake_case : str = 1
snake_case : Optional[Any] = index.search(lowercase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> str:
from elasticsearch import Elasticsearch
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
snake_case : Dict = Elasticsearch()
snake_case : Tuple = {"""acknowledged""": True}
snake_case : int = ElasticSearchIndex(es_client=A )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["""foo""", """bar""", """foobar"""] )
# single query
snake_case : Optional[int] = """foo"""
snake_case : Tuple = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
snake_case : Tuple = index.search(A )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
snake_case : List[Any] = """foo"""
snake_case : Dict = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
snake_case : Any = index.search(A , request_timeout=3_0 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
snake_case : Tuple = ["""foo""", """bar""", """foobar"""]
snake_case : Optional[Any] = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
snake_case : Any = index.search_batch(A )
snake_case : List[Any] = [scores[0] for scores in total_scores]
snake_case : int = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A ) , 0 )
self.assertListEqual([1, 1, 1] , A )
# batched queries with timeout
snake_case : Optional[Any] = ["""foo""", """bar""", """foobar"""]
snake_case : int = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
snake_case : Union[str, Any] = index.search_batch(A , request_timeout=3_0 )
snake_case : int = [scores[0] for scores in total_scores]
snake_case : Optional[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A ) , 0 )
self.assertListEqual([1, 1, 1] , A )
| 721 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Any = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
lowerCamelCase : Any = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
lowerCamelCase : Optional[int] = {
'jukebox': 5_1_2,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_LYRIC_TOKENS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A , A , A=["v3", "v2", "v2"] , A=5_1_2 , A=5 , A="<|endoftext|>" , **A , ) -> Optional[Any]:
snake_case : Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
super().__init__(
unk_token=A , n_genres=A , version=A , max_n_lyric_tokens=A , **A , )
snake_case : Optional[Any] = version
snake_case : Optional[Any] = max_n_lyric_tokens
snake_case : Tuple = n_genres
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : Union[str, Any] = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : str = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : List[str] = json.load(A )
snake_case : Tuple = r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 7_9:
snake_case : Optional[Any] = oov.replace(r"""\-'""" , r"""\-+'""" )
snake_case : Optional[Any] = regex.compile(A )
snake_case : Optional[Any] = {v: k for k, v in self.artists_encoder.items()}
snake_case : int = {v: k for k, v in self.genres_encoder.items()}
snake_case : List[Any] = {v: k for k, v in self.lyrics_encoder.items()}
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def UpperCAmelCase ( self ) -> str:
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def UpperCAmelCase ( self , A , A , A ) -> Optional[Any]:
snake_case : Optional[int] = [self.artists_encoder.get(A , 0 ) for artist in list_artists]
for genres in range(len(A ) ):
snake_case : Optional[int] = [self.genres_encoder.get(A , 0 ) for genre in list_genres[genres]]
snake_case : Union[str, Any] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
snake_case : Optional[Any] = [[self.lyrics_encoder.get(A , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def UpperCAmelCase ( self , A ) -> List[str]:
return list(A )
def UpperCAmelCase ( self , A , A , A , **A ) -> List[str]:
snake_case , snake_case , snake_case : Any = self.prepare_for_tokenization(A , A , A )
snake_case : Tuple = self._tokenize(A )
return artist, genre, lyrics
def UpperCAmelCase ( self , A , A , A , A = False ) -> Tuple[str, str, str, Dict[str, Any]]:
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
snake_case : Tuple = artists[idx].lower()
snake_case : List[Any] = [genres[idx].lower()]
else:
snake_case : Union[str, Any] = self._normalize(artists[idx] ) + """.v2"""
snake_case : Any = [
self._normalize(A ) + """.v2""" for genre in genres[idx].split("""_""" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
snake_case : str = regex.compile(r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" )
snake_case : Dict = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
snake_case : Union[str, Any] = {vocab[index]: index + 1 for index in range(len(A ) )}
snake_case : Optional[int] = 0
snake_case : Union[str, Any] = len(A ) + 1
snake_case : Optional[int] = self.vocab
snake_case : str = {v: k for k, v in self.vocab.items()}
snake_case : int = """"""
else:
snake_case : Optional[int] = regex.compile(r"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""" )
snake_case : int = self._run_strip_accents(A )
snake_case : Any = lyrics.replace("""\\""" , """\n""" )
snake_case : Tuple = self.out_of_vocab.sub("""""" , A ), [], []
return artists, genres, lyrics
def UpperCAmelCase ( self , A ) -> List[Any]:
snake_case : int = unicodedata.normalize("""NFD""" , A )
snake_case : int = []
for char in text:
snake_case : Optional[Any] = unicodedata.category(A )
if cat == "Mn":
continue
output.append(A )
return "".join(A )
def UpperCAmelCase ( self , A ) -> str:
snake_case : Dict = (
[chr(A ) for i in range(ord("""a""" ) , ord("""z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""A""" ) , ord("""Z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""0""" ) , ord("""9""" ) + 1 )]
+ ["""."""]
)
snake_case : Dict = frozenset(A )
snake_case : Dict = re.compile(r"""_+""" )
snake_case : str = """""".join([c if c in accepted else """_""" for c in text.lower()] )
snake_case : List[Any] = pattern.sub("""_""" , A ).strip("""_""" )
return text
def UpperCAmelCase ( self , A ) -> str:
return " ".join(A )
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[Any]:
# Convert to TensorType
if not isinstance(A , A ):
snake_case : Tuple = TensorType(A )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""" )
import tensorflow as tf
snake_case : Union[str, Any] = tf.constant
snake_case : int = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""" )
import torch
snake_case : List[str] = torch.tensor
snake_case : Optional[Any] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""" )
import jax.numpy as jnp # noqa: F811
snake_case : Optional[int] = jnp.array
snake_case : Dict = _is_jax
else:
snake_case : List[str] = np.asarray
snake_case : Tuple = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
snake_case : Any = [inputs]
if not is_tensor(A ):
snake_case : List[Any] = as_tensor(A )
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""" )
return inputs
def __call__( self , A , A , A="" , A="pt" ) -> BatchEncoding:
snake_case : List[str] = [0, 0, 0]
snake_case : List[str] = [artist] * len(self.version )
snake_case : List[Any] = [genres] * len(self.version )
snake_case , snake_case , snake_case : Optional[int] = self.tokenize(A , A , A )
snake_case , snake_case , snake_case : int = self._convert_token_to_id(A , A , A )
snake_case : Any = [-INFINITY] * len(full_tokens[-1] )
snake_case : int = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=A )
for i in range(len(self.version ) )
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks} )
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=A ) )
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=A ) )
snake_case : Tuple = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=A ) )
return (artists_file, genres_file, lyrics_file)
def UpperCAmelCase ( self , A , A , A ) -> List[Any]:
snake_case : Optional[int] = self.artists_decoder.get(A )
snake_case : Optional[Any] = [self.genres_decoder.get(A ) for genre in genres_index]
snake_case : Optional[int] = [self.lyrics_decoder.get(A ) for character in lyric_index]
return artist, genres, lyrics
| 684 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCamelCase : Optional[Any] = 'pt'
elif is_tf_available():
lowerCamelCase : Dict = 'tf'
else:
lowerCamelCase : str = 'jax'
class __lowercase (UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = ByTaTokenizer
_snake_case = False
def UpperCAmelCase ( self ) -> List[str]:
super().setUp()
snake_case : Optional[Any] = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase ( self ) -> Any:
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def UpperCAmelCase ( self , **A ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase ( self , A , A=False , A=2_0 , A=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
snake_case : List[str] = []
for i in range(len(A ) ):
try:
snake_case : Tuple = tokenizer.decode([i] , clean_up_tokenization_spaces=A )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
snake_case : List[Any] = list(filter(lambda A : re.match(r"""^[ a-zA-Z]+$""" , t[1] ) , A ) )
snake_case : Any = list(filter(lambda A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=A ) , A ) )
if max_length is not None and len(A ) > max_length:
snake_case : Dict = toks[:max_length]
if min_length is not None and len(A ) < min_length and len(A ) > 0:
while len(A ) < min_length:
snake_case : Any = toks + toks
# toks_str = [t[1] for t in toks]
snake_case : Union[str, Any] = [t[0] for t in toks]
# Ensure consistency
snake_case : List[str] = tokenizer.decode(A , clean_up_tokenization_spaces=A )
if " " not in output_txt and len(A ) > 1:
snake_case : Optional[int] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=A )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=A )
)
if with_prefix_space:
snake_case : Optional[int] = """ """ + output_txt
snake_case : Optional[Any] = tokenizer.encode(A , add_special_tokens=A )
return output_txt, output_ids
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Any = self.ta_base_tokenizer
snake_case : Optional[Any] = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
snake_case : Union[str, Any] = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] )
def UpperCAmelCase ( self ) -> str:
snake_case : Tuple = self.ta_base_tokenizer
snake_case : Optional[Any] = """Unicode €."""
snake_case : Any = tokenizer(A )
snake_case : Optional[int] = [8_8, 1_1_3, 1_0_8, 1_0_2, 1_1_4, 1_0_3, 1_0_4, 3_5, 2_2_9, 1_3_3, 1_7_5, 4_9, 1]
self.assertEqual(encoded["""input_ids"""] , A )
# decoding
snake_case : List[Any] = tokenizer.decode(A )
self.assertEqual(A , """Unicode €.</s>""" )
snake_case : int = tokenizer("""e è é ê ë""" )
snake_case : List[str] = [1_0_4, 3_5, 1_9_8, 1_7_1, 3_5, 1_9_8, 1_7_2, 3_5, 1_9_8, 1_7_3, 3_5, 1_9_8, 1_7_4, 1]
self.assertEqual(encoded["""input_ids"""] , A )
# decoding
snake_case : Optional[Any] = tokenizer.decode(A )
self.assertEqual(A , """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" )
def UpperCAmelCase ( self ) -> str:
snake_case : Union[str, Any] = self.ta_base_tokenizer
snake_case : List[str] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
snake_case : int = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 1, 0]
# fmt: on
snake_case : str = tokenizer(A , padding=A , return_tensors=A )
self.assertIsInstance(A , A )
if FRAMEWORK != "jax":
snake_case : List[Any] = list(batch.input_ids.numpy()[0] )
else:
snake_case : Optional[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(A , A )
self.assertEqual((2, 3_7) , batch.input_ids.shape )
self.assertEqual((2, 3_7) , batch.attention_mask.shape )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : List[str] = self.ta_base_tokenizer
snake_case : Any = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
snake_case : List[str] = tokenizer(A , padding=A , return_tensors=A )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , A )
self.assertIn("""attention_mask""" , A )
self.assertNotIn("""decoder_input_ids""" , A )
self.assertNotIn("""decoder_attention_mask""" , A )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : List[str] = self.ta_base_tokenizer
snake_case : Dict = [
"""Summary of the text.""",
"""Another summary.""",
]
snake_case : Tuple = tokenizer(
text_target=A , max_length=3_2 , padding="""max_length""" , truncation=A , return_tensors=A )
self.assertEqual(3_2 , targets["""input_ids"""].shape[1] )
def UpperCAmelCase ( self ) -> List[str]:
snake_case : List[Any] = self.ta_base_tokenizer
snake_case : Optional[Any] = ["""A long paragraph for summarization. </s>"""]
snake_case : List[Any] = ["""Summary of the text. </s>"""]
# fmt: off
snake_case : Dict = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 3_5, 1]
snake_case : List[Any] = [8_6, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_2_4, 3_5, 1_1_4, 1_0_5, 3_5, 1_1_9, 1_0_7, 1_0_4, 3_5, 1_1_9, 1_0_4, 1_2_3, 1_1_9, 4_9, 3_5, 1]
# fmt: on
snake_case : Dict = tokenizer(A , text_target=A )
self.assertEqual(A , batch["""input_ids"""][0] )
self.assertEqual(A , batch["""labels"""][0] )
def UpperCAmelCase ( self ) -> Optional[Any]:
# safety check on max_len default value so we are sure the test works
snake_case : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
snake_case : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
snake_case : Dict = tempfile.mkdtemp()
snake_case : str = """ He is very happy, UNwant\u00E9d,running"""
snake_case : Dict = tokenizer.encode(A , add_special_tokens=A )
tokenizer.save_pretrained(A )
snake_case : List[Any] = tokenizer.__class__.from_pretrained(A )
snake_case : Tuple = after_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
shutil.rmtree(A )
snake_case : Tuple = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
snake_case : List[str] = tempfile.mkdtemp()
snake_case : Any = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
snake_case : Any = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
snake_case : Tuple = tokenizer.encode(A , add_special_tokens=A )
tokenizer.save_pretrained(A )
snake_case : Tuple = tokenizer.__class__.from_pretrained(A )
snake_case : int = after_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
snake_case : Optional[int] = tokenizer.__class__.from_pretrained(A , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(A )
def UpperCAmelCase ( self ) -> Any:
snake_case : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(A )
with open(os.path.join(A , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
snake_case : Union[str, Any] = json.load(A )
with open(os.path.join(A , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
snake_case : Dict = json.load(A )
snake_case : int = [f"""<extra_id_{i}>""" for i in range(1_2_5 )]
snake_case : Any = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
snake_case : Optional[int] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(A , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(A , A )
with open(os.path.join(A , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(A , A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
snake_case : Tuple = tokenizer_class.from_pretrained(
A , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
snake_case : Union[str, Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=A )]
snake_case : List[Any] = tokenizer_class.from_pretrained(
A , additional_special_tokens=A , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def UpperCAmelCase ( self ) -> Any:
snake_case : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(A )
snake_case : Any = tokenizer_class.from_pretrained(A )
self.assertTrue(tokenizer.decode([2_5_5] ) == """""" )
def UpperCAmelCase ( self ) -> Tuple:
pass
def UpperCAmelCase ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase ( self ) -> Any:
pass
def UpperCAmelCase ( self ) -> str:
pass
def UpperCAmelCase ( self ) -> Tuple:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
snake_case : Optional[Any] = self.get_tokenizers(fast=A , do_lower_case=A )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
snake_case : Optional[Any] = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
snake_case : Optional[Any] = tokenizer.convert_tokens_to_string(A )
self.assertIsInstance(A , A )
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
snake_case : Union[str, Any] = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
snake_case : Optional[int] = 0
snake_case : Tuple = tokenizer.convert_ids_to_tokens(
A , skip_special_tokens=A )
for attr in attributes_list:
setattr(A , attr + """_id""" , A )
self.assertEqual(getattr(A , A ) , A )
self.assertEqual(getattr(A , attr + """_id""" ) , A )
setattr(A , attr + """_id""" , A )
self.assertEqual(getattr(A , A ) , A )
self.assertEqual(getattr(A , attr + """_id""" ) , A )
setattr(A , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(A , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(A , """additional_special_tokens_ids""" ) , [] )
setattr(A , """additional_special_tokens_ids""" , [token_id_to_test_setters] )
self.assertListEqual(getattr(A , """additional_special_tokens""" ) , [token_to_test_setters] )
self.assertListEqual(getattr(A , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
| 700 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
snake_case : str = len(lowercase )
snake_case : Tuple = []
for i in range(len(lowercase ) - pat_len + 1 ):
snake_case : str = True
for j in range(lowercase ):
if s[i + j] != pattern[j]:
snake_case : Dict = False
break
if match_found:
position.append(lowercase )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 684 | 0 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
lowerCamelCase = get_tests_dir('fixtures')
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[int]:
# A mock response for an HTTP head request to emulate server down
snake_case : Optional[int] = mock.Mock()
snake_case : List[str] = 5_0_0
snake_case : Optional[int] = {}
snake_case : Any = HTTPError
snake_case : int = {}
# Download this model to make sure it's in the cache.
snake_case : Any = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=A ) as mock_head:
snake_case : str = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase ( self ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
snake_case : Optional[Any] = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def UpperCAmelCase ( self ) -> Optional[Any]:
with self.assertRaises(A ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case : Optional[Any] = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
snake_case : Union[str, Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(A )
@is_staging_test
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase ( cls ) -> str:
snake_case : str = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCAmelCase ( cls ) -> Optional[int]:
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : Optional[int] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
snake_case : Optional[int] = ViTImageProcessor.from_pretrained(f"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A , repo_id="""test-image-processor""" , push_to_hub=A , use_auth_token=self._token )
snake_case : int = ViTImageProcessor.from_pretrained(f"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
def UpperCAmelCase ( self ) -> str:
snake_case : Optional[Any] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
snake_case : Union[str, Any] = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=A , use_auth_token=self._token )
snake_case : List[str] = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
def UpperCAmelCase ( self ) -> Optional[int]:
CustomImageProcessor.register_for_auto_class()
snake_case : Optional[int] = CustomImageProcessor.from_pretrained(A )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
snake_case : Optional[Any] = AutoImageProcessor.from_pretrained(
f"""{USER}/test-dynamic-image-processor""" , trust_remote_code=A )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 701 |
import numpy as np
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list:
if len(lowercase ) == 0:
return []
snake_case : Optional[Any] = min(lowercase ), max(lowercase )
snake_case : List[Any] = int(max_value - min_value ) + 1
snake_case : list[list] = [[] for _ in range(lowercase )]
for i in my_list:
buckets[int(i - min_value )].append(lowercase )
return [v for bucket in buckets for v in sorted(lowercase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 702 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase : Tuple = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 684 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """xmod"""
def __init__( self , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=2 , A=0.02 , A=1e-1_2 , A=1 , A=0 , A=2 , A="absolute" , A=True , A=None , A=False , A=2 , A=False , A=True , A=True , A=("en_XX",) , A=None , **A , ) -> str:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
snake_case : str = vocab_size
snake_case : List[Any] = hidden_size
snake_case : str = num_hidden_layers
snake_case : int = num_attention_heads
snake_case : List[str] = hidden_act
snake_case : str = intermediate_size
snake_case : List[str] = hidden_dropout_prob
snake_case : Any = attention_probs_dropout_prob
snake_case : Optional[int] = max_position_embeddings
snake_case : Dict = type_vocab_size
snake_case : str = initializer_range
snake_case : List[str] = layer_norm_eps
snake_case : str = position_embedding_type
snake_case : List[Any] = use_cache
snake_case : Dict = classifier_dropout
snake_case : Union[str, Any] = pre_norm
snake_case : Union[str, Any] = adapter_reduction_factor
snake_case : Optional[int] = adapter_layer_norm
snake_case : str = adapter_reuse_layer_norm
snake_case : Tuple = ln_before_adapter
snake_case : List[Any] = list(A )
snake_case : Tuple = default_language
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 703 |
lowerCamelCase : Union[str, Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCamelCase : Tuple = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCamelCase : Union[str, Any] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 684 | 0 |
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase :
"""simple docstring"""
def __init__( self , A , A=1_3 , A=7 , A=True , A=True , A=True , A=True , A=True , A=False , A=False , A=False , A=2 , A=9_9 , A=0 , A=3_2 , A=5 , A=4 , A=0.1 , A=0.1 , A=5_1_2 , A=2 , A=0.02 , A=2 , A=4 , A="last" , A=True , A=None , A=0 , ) -> Union[str, Any]:
snake_case : Optional[Any] = parent
snake_case : Dict = batch_size
snake_case : List[str] = seq_length
snake_case : str = is_training
snake_case : str = use_input_lengths
snake_case : List[Any] = use_token_type_ids
snake_case : Optional[int] = use_labels
snake_case : Optional[int] = gelu_activation
snake_case : Dict = sinusoidal_embeddings
snake_case : Any = causal
snake_case : str = asm
snake_case : Dict = n_langs
snake_case : Optional[Any] = vocab_size
snake_case : Any = n_special
snake_case : List[str] = hidden_size
snake_case : List[Any] = num_hidden_layers
snake_case : Dict = num_attention_heads
snake_case : List[Any] = hidden_dropout_prob
snake_case : Dict = attention_probs_dropout_prob
snake_case : List[Any] = max_position_embeddings
snake_case : Optional[Any] = type_sequence_label_size
snake_case : Any = initializer_range
snake_case : List[str] = num_labels
snake_case : str = num_choices
snake_case : Union[str, Any] = summary_type
snake_case : Optional[int] = use_proj
snake_case : List[Any] = scope
snake_case : List[str] = bos_token_id
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : int = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : Any = None
if self.use_input_lengths:
snake_case : Tuple = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
snake_case : Any = None
if self.use_token_type_ids:
snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
snake_case : Optional[int] = None
snake_case : List[Any] = None
snake_case : Dict = None
if self.use_labels:
snake_case : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : List[str] = ids_tensor([self.batch_size] , 2 ).float()
snake_case : Any = ids_tensor([self.batch_size] , self.num_choices )
snake_case : Tuple = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase ( self ) -> int:
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def UpperCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> List[str]:
snake_case : List[Any] = XLMModel(config=A )
model.to(A )
model.eval()
snake_case : str = model(A , lengths=A , langs=A )
snake_case : Optional[Any] = model(A , langs=A )
snake_case : str = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> Any:
snake_case : str = XLMWithLMHeadModel(A )
model.to(A )
model.eval()
snake_case : Union[str, Any] = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> Union[str, Any]:
snake_case : List[str] = XLMForQuestionAnsweringSimple(A )
model.to(A )
model.eval()
snake_case : Tuple = model(A )
snake_case : List[str] = model(A , start_positions=A , end_positions=A )
snake_case : Any = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> Optional[Any]:
snake_case : Any = XLMForQuestionAnswering(A )
model.to(A )
model.eval()
snake_case : Union[str, Any] = model(A )
snake_case : Dict = model(
A , start_positions=A , end_positions=A , cls_index=A , is_impossible=A , p_mask=A , )
snake_case : List[Any] = model(
A , start_positions=A , end_positions=A , cls_index=A , is_impossible=A , )
(snake_case ) : List[str] = result_with_labels.to_tuple()
snake_case : int = model(A , start_positions=A , end_positions=A )
(snake_case ) : Optional[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> Optional[Any]:
snake_case : Any = XLMForSequenceClassification(A )
model.to(A )
model.eval()
snake_case : Optional[Any] = model(A )
snake_case : Optional[int] = model(A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> int:
snake_case : Tuple = self.num_labels
snake_case : Any = XLMForTokenClassification(A )
model.to(A )
model.eval()
snake_case : Tuple = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> str:
snake_case : Optional[Any] = self.num_choices
snake_case : Optional[Any] = XLMForMultipleChoice(config=A )
model.to(A )
model.eval()
snake_case : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Dict = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Optional[int] = self.prepare_config_and_inputs()
(
snake_case
) : Tuple = config_and_inputs
snake_case : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths}
return config, inputs_dict
@require_torch
class __lowercase (UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_snake_case = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_snake_case = (
{
"""feature-extraction""": XLMModel,
"""fill-mask""": XLMWithLMHeadModel,
"""question-answering""": XLMForQuestionAnsweringSimple,
"""text-classification""": XLMForSequenceClassification,
"""text-generation""": XLMWithLMHeadModel,
"""token-classification""": XLMForTokenClassification,
"""zero-shot""": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase ( self , A , A , A , A , A ) -> Optional[int]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase ( self , A , A , A=False ) -> int:
snake_case : Any = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
snake_case : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
snake_case : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def UpperCAmelCase ( self ) -> List[str]:
snake_case : int = XLMModelTester(self )
snake_case : int = ConfigTester(self , config_class=A , emb_dim=3_7 )
def UpperCAmelCase ( self ) -> int:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> int:
snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*A )
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*A )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*A )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*A )
def UpperCAmelCase ( self ) -> int:
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*A )
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*A )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*A )
def UpperCAmelCase ( self , A , A , A , A , A , A=False , A=1 ) -> List[Any]:
self.assertIsInstance(A , A )
self.assertListEqual(
[isinstance(A , A ) for iter_attentions in attentions] , [True] * len(A ) )
self.assertEqual(len(A ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(A ):
# adds PAD dummy token
snake_case : Tuple = min_length + idx + 1
snake_case : Dict = min_length + idx + 1
snake_case : List[str] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(A ) )
def UpperCAmelCase ( self , A , A , A , A , A , A=False , A=1 ) -> str:
self.assertIsInstance(A , A )
self.assertListEqual(
[isinstance(A , A ) for iter_hidden_states in hidden_states] , [True] * len(A ) , )
self.assertEqual(len(A ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(A ):
# adds PAD dummy token
snake_case : Optional[Any] = min_length + idx + 1
snake_case : Tuple = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(A ) , )
pass
@slow
def UpperCAmelCase ( self ) -> str:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : int = XLMModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : List[Any] = XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" )
model.to(A )
snake_case : List[Any] = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=A ) # the president
snake_case : int = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
snake_case : List[Any] = model.generate(A , do_sample=A )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , A )
| 704 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'vocab_file': 'spm_char.model'}
lowerCamelCase : List[str] = {
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
lowerCamelCase : List[Any] = {
'microsoft/speecht5_asr': 1_0_2_4,
'microsoft/speecht5_tts': 1_0_2_4,
'microsoft/speecht5_vc': 1_0_2_4,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A="<s>" , A="</s>" , A="<unk>" , A="<pad>" , A = None , **A , ) -> None:
snake_case : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , pad_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
snake_case : Tuple = vocab_file
snake_case : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def UpperCAmelCase ( self ) -> List[Any]:
return self.sp_model.get_piece_size()
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Any = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[str]:
snake_case : Optional[Any] = self.__dict__.copy()
snake_case : Optional[Any] = None
return state
def __setstate__( self , A ) -> Tuple:
snake_case : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case : List[Any] = {}
snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def UpperCAmelCase ( self , A ) -> Tuple:
return self.sp_model.piece_to_id(A )
def UpperCAmelCase ( self , A ) -> int:
snake_case : Union[str, Any] = self.sp_model.IdToPiece(A )
return token
def UpperCAmelCase ( self , A ) -> Tuple:
snake_case : Optional[int] = []
snake_case : str = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A ) + token
snake_case : Dict = []
else:
current_sub_tokens.append(A )
out_string += self.sp_model.decode(A )
return out_string.strip()
def UpperCAmelCase ( self , A , A=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
snake_case : Any = [1]
if token_ids_a is None:
return ([0] * len(A )) + suffix_ones
return ([0] * len(A )) + ([0] * len(A )) + suffix_ones
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Optional[Any] = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , """wb""" ) as fi:
snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 684 | 0 |
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax""", """transformers"""]
def __init__( self , *A , **A ) -> List[str]:
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Any:
requires_backends(cls , ["""flax""", """transformers"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax""", """transformers"""]
def __init__( self , *A , **A ) -> Dict:
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax""", """transformers"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax""", """transformers"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Any:
requires_backends(cls , ["""flax""", """transformers"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax""", """transformers"""]
def __init__( self , *A , **A ) -> List[str]:
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax""", """transformers"""] )
| 705 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """gpt_neox_japanese"""
def __init__( self , A=3_2_0_0_0 , A=2_5_6_0 , A=3_2 , A=3_2 , A=4 , A="gelu" , A=1.00 , A=1_0_0_0_0 , A=2_0_4_8 , A=0.02 , A=1e-5 , A=True , A=3_1_9_9_6 , A=3_1_9_9_9 , A=0.1 , A=0.0 , **A , ) -> str:
super().__init__(bos_token_id=A , eos_token_id=A , **A )
snake_case : Optional[Any] = vocab_size
snake_case : Optional[Any] = max_position_embeddings
snake_case : Union[str, Any] = hidden_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : Optional[int] = intermediate_multiple_size
snake_case : int = hidden_act
snake_case : str = rotary_pct
snake_case : Optional[Any] = rotary_emb_base
snake_case : Any = initializer_range
snake_case : Any = layer_norm_eps
snake_case : Optional[Any] = use_cache
snake_case : Tuple = attention_dropout
snake_case : Tuple = hidden_dropout
| 684 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase : str = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
lowerCamelCase : Tuple = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowerCamelCase : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[int]:
with open(lowercase ,"""rb""" ) as f:
snake_case : Optional[int] = Image.open(lowercase )
return im.convert("""RGB""" )
@dataclass
class __lowercase :
"""simple docstring"""
_snake_case = field(
default=UpperCamelCase__ , metadata={
"""help""": """Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."""
} , )
_snake_case = field(
default=UpperCamelCase__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
_snake_case = field(default=UpperCamelCase__ , metadata={"""help""": """A folder containing the training data."""} )
_snake_case = field(default=UpperCamelCase__ , metadata={"""help""": """A folder containing the validation data."""} )
_snake_case = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
_snake_case = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_snake_case = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def UpperCAmelCase ( self ) -> Any:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"""You must specify either a dataset name from the hub or a train and/or validation directory.""" )
@dataclass
class __lowercase :
"""simple docstring"""
_snake_case = field(
default="""google/vit-base-patch16-224-in21k""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
_snake_case = field(
default=UpperCamelCase__ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(UpperCamelCase__ )} , )
_snake_case = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_snake_case = field(
default=UpperCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
_snake_case = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_snake_case = field(default=UpperCamelCase__ , metadata={"""help""": """Name or path of preprocessor config."""} )
_snake_case = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
_snake_case = field(
default=UpperCamelCase__ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
snake_case : str = torch.stack([example["""pixel_values"""] for example in examples] )
snake_case : str = torch.tensor([example["""labels"""] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_image_classification""" ,lowercase ,lowercase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case : List[str] = training_args.get_process_log_level()
logger.setLevel(lowercase )
transformers.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
snake_case : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
snake_case : Union[str, Any] = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,cache_dir=model_args.cache_dir ,task="""image-classification""" ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
snake_case : List[str] = {}
if data_args.train_dir is not None:
snake_case : int = os.path.join(data_args.train_dir ,"""**""" )
if data_args.validation_dir is not None:
snake_case : Optional[int] = os.path.join(data_args.validation_dir ,"""**""" )
snake_case : Optional[Any] = load_dataset(
"""imagefolder""" ,data_files=lowercase ,cache_dir=model_args.cache_dir ,task="""image-classification""" ,)
# If we don't have a validation split, split off a percentage of train as validation.
snake_case : List[Any] = None if """validation""" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split ,lowercase ) and data_args.train_val_split > 0.0:
snake_case : Optional[int] = dataset["""train"""].train_test_split(data_args.train_val_split )
snake_case : List[Any] = split["""train"""]
snake_case : int = split["""test"""]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
snake_case : List[str] = dataset["""train"""].features["""labels"""].names
snake_case : Optional[int] = {}, {}
for i, label in enumerate(lowercase ):
snake_case : Dict = str(lowercase )
snake_case : Union[str, Any] = label
# Load the accuracy metric from the datasets package
snake_case : Dict = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase ):
return metric.compute(predictions=np.argmax(p.predictions ,axis=1 ) ,references=p.label_ids )
snake_case : int = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path ,num_labels=len(lowercase ) ,labelaid=lowercase ,idalabel=lowercase ,finetuning_task="""image-classification""" ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
snake_case : Tuple = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=lowercase ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
snake_case : List[str] = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
snake_case : List[Any] = image_processor.size["""shortest_edge"""]
else:
snake_case : Dict = (image_processor.size["""height"""], image_processor.size["""width"""])
snake_case : List[str] = Normalize(mean=image_processor.image_mean ,std=image_processor.image_std )
snake_case : Dict = Compose(
[
RandomResizedCrop(lowercase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
snake_case : Optional[Any] = Compose(
[
Resize(lowercase ),
CenterCrop(lowercase ),
ToTensor(),
normalize,
] )
def train_transforms(lowercase ):
snake_case : Union[str, Any] = [
_train_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["""image"""]
]
return example_batch
def val_transforms(lowercase ):
snake_case : List[Any] = [_val_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["""image"""]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
snake_case : Optional[Any] = (
dataset["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(lowercase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
snake_case : List[str] = (
dataset["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(lowercase )
# Initalize our trainer
snake_case : Union[str, Any] = Trainer(
model=lowercase ,args=lowercase ,train_dataset=dataset["""train"""] if training_args.do_train else None ,eval_dataset=dataset["""validation"""] if training_args.do_eval else None ,compute_metrics=lowercase ,tokenizer=lowercase ,data_collator=lowercase ,)
# Training
if training_args.do_train:
snake_case : List[Any] = None
if training_args.resume_from_checkpoint is not None:
snake_case : Optional[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case : int = last_checkpoint
snake_case : Optional[Any] = trainer.train(resume_from_checkpoint=lowercase )
trainer.save_model()
trainer.log_metrics("""train""" ,train_result.metrics )
trainer.save_metrics("""train""" ,train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
snake_case : Any = trainer.evaluate()
trainer.log_metrics("""eval""" ,lowercase )
trainer.save_metrics("""eval""" ,lowercase )
# Write model card and (optionally) push to hub
snake_case : int = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """image-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""image-classification""", """vision"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase )
else:
trainer.create_model_card(**lowercase )
if __name__ == "__main__":
main()
| 706 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Optional[Any] = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
snake_case : Any = hex_num[0] == """-"""
if is_negative:
snake_case : int = hex_num[1:]
try:
snake_case : List[Any] = int(lowercase ,16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
snake_case : Dict = """"""
while int_num > 0:
snake_case : Dict = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 0 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowerCamelCase : Union[str, Any] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase : List[str] = logging.getLogger()
def SCREAMING_SNAKE_CASE__ ( ) -> str:
snake_case : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
snake_case : Optional[int] = parser.parse_args()
return args.f
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase="eval" ) -> int:
snake_case : Union[str, Any] = os.path.join(lowercase ,f"""{split}_results.json""" )
if os.path.exists(lowercase ):
with open(lowercase ,"""r""" ) as f:
return json.load(lowercase )
raise ValueError(f"""can't find {path}""" )
lowerCamelCase : Tuple = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Tuple:
snake_case : int = self.get_auto_remove_tmp_dir()
snake_case : int = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(A , """argv""" , A ):
run_flax_glue.main()
snake_case : Union[str, Any] = get_results(A )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
@slow
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Any = self.get_auto_remove_tmp_dir()
snake_case : List[str] = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A , """argv""" , A ):
run_clm_flax.main()
snake_case : Optional[int] = get_results(A )
self.assertLess(result["""eval_perplexity"""] , 1_0_0 )
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : Union[str, Any] = self.get_auto_remove_tmp_dir()
snake_case : Optional[int] = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(A , """argv""" , A ):
run_summarization_flax.main()
snake_case : Optional[Any] = get_results(A , split="""test""" )
self.assertGreaterEqual(result["""test_rouge1"""] , 1_0 )
self.assertGreaterEqual(result["""test_rouge2"""] , 2 )
self.assertGreaterEqual(result["""test_rougeL"""] , 7 )
self.assertGreaterEqual(result["""test_rougeLsum"""] , 7 )
@slow
def UpperCAmelCase ( self ) -> List[str]:
snake_case : List[Any] = self.get_auto_remove_tmp_dir()
snake_case : str = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(A , """argv""" , A ):
run_mlm_flax.main()
snake_case : Optional[int] = get_results(A )
self.assertLess(result["""eval_perplexity"""] , 4_2 )
@slow
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Dict = self.get_auto_remove_tmp_dir()
snake_case : Optional[Any] = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A , """argv""" , A ):
run_ta_mlm_flax.main()
snake_case : Tuple = get_results(A )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.42 )
@slow
def UpperCAmelCase ( self ) -> Any:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
snake_case : Tuple = 7 if get_gpu_count() > 1 else 2
snake_case : str = self.get_auto_remove_tmp_dir()
snake_case : int = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(A , """argv""" , A ):
run_flax_ner.main()
snake_case : Any = get_results(A )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
self.assertGreaterEqual(result["""eval_f1"""] , 0.3 )
@slow
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : List[str] = self.get_auto_remove_tmp_dir()
snake_case : Optional[int] = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(A , """argv""" , A ):
run_qa.main()
snake_case : Dict = get_results(A )
self.assertGreaterEqual(result["""eval_f1"""] , 3_0 )
self.assertGreaterEqual(result["""eval_exact"""] , 3_0 )
| 707 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""pixel_values"""]
def __init__( self , A = True , A = None , A = PIL.Image.BICUBIC , A = True , A = None , A = 1 / 2_5_5 , A = True , A = True , A = None , A = None , **A , ) -> None:
super().__init__(**A )
snake_case : int = size if size is not None else {"""height""": 2_5_6, """width""": 2_5_6}
snake_case : int = get_size_dict(A )
snake_case : Optional[Any] = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
snake_case : Dict = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = do_resize
snake_case : str = size
snake_case : Tuple = resample
snake_case : Any = do_center_crop
snake_case : Tuple = crop_size
snake_case : int = do_rescale
snake_case : Dict = rescale_factor
snake_case : Union[str, Any] = do_normalize
snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase ( self , A , A , A = PIL.Image.BICUBIC , A = None , **A , ) -> np.ndarray:
snake_case : Dict = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
A , size=(size["""height"""], size["""width"""]) , resample=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> np.ndarray:
snake_case : Any = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(A , size=(size["""height"""], size["""width"""]) , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> Tuple:
return rescale(A , scale=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A , A = None , **A , ) -> np.ndarray:
return normalize(A , mean=A , std=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A = None , A = None , A=None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
snake_case : str = do_resize if do_resize is not None else self.do_resize
snake_case : Dict = resample if resample is not None else self.resample
snake_case : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Tuple = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case : int = image_mean if image_mean is not None else self.image_mean
snake_case : List[str] = image_std if image_std is not None else self.image_std
snake_case : Dict = size if size is not None else self.size
snake_case : Tuple = get_size_dict(A )
snake_case : Dict = crop_size if crop_size is not None else self.crop_size
snake_case : List[str] = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case : Optional[Any] = [to_numpy_array(A ) for image in images]
if do_resize:
snake_case : Dict = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_center_crop:
snake_case : List[str] = [self.center_crop(image=A , size=A ) for image in images]
if do_rescale:
snake_case : List[str] = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
snake_case : str = [self.normalize(image=A , mean=A , std=A ) for image in images]
snake_case : Union[str, Any] = [to_channel_dimension_format(A , A ) for image in images]
snake_case : List[Any] = {"""pixel_values""": images}
return BatchFeature(data=A , tensor_type=A )
| 684 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase = None ,lowercase = None ) -> None:
if start is None:
snake_case : List[str] = 0
if end is None:
snake_case : Any = len(lowercase ) - 1
if start >= end:
return
snake_case : Union[str, Any] = (start + end) // 2
slowsort(lowercase ,lowercase ,lowercase )
slowsort(lowercase ,mid + 1 ,lowercase )
if sequence[end] < sequence[mid]:
snake_case : Optional[Any] = sequence[mid], sequence[end]
slowsort(lowercase ,lowercase ,end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 708 |
import inspect
import unittest
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase ( self ) -> Tuple:
import diffusers
from diffusers.dependency_versions_table import deps
snake_case : List[str] = inspect.getmembers(A , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
snake_case : Tuple = """k-diffusion"""
elif backend == "invisible_watermark":
snake_case : Optional[int] = """invisible-watermark"""
assert backend in deps, f"""{backend} is not in the deps table!"""
| 684 | 0 |
import logging
from transformers.configuration_utils import PretrainedConfig
lowerCamelCase : Any = logging.getLogger(__name__)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """masked_bert"""
def __init__( self , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=2 , A=0.02 , A=1e-1_2 , A=0 , A="topK" , A="constant" , A=0.0 , **A , ) -> Union[str, Any]:
super().__init__(pad_token_id=A , **A )
snake_case : Union[str, Any] = vocab_size
snake_case : int = hidden_size
snake_case : Optional[int] = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : Union[str, Any] = hidden_act
snake_case : Union[str, Any] = intermediate_size
snake_case : str = hidden_dropout_prob
snake_case : Any = attention_probs_dropout_prob
snake_case : List[str] = max_position_embeddings
snake_case : Optional[Any] = type_vocab_size
snake_case : Tuple = initializer_range
snake_case : List[Any] = layer_norm_eps
snake_case : Optional[Any] = pruning_method
snake_case : Any = mask_init
snake_case : List[Any] = mask_scale
| 709 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
lowerCamelCase : Union[str, Any] = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
lowerCamelCase : List[Any] = 'main'
# Default branch name
lowerCamelCase : Tuple = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
lowerCamelCase : List[Any] = 'aaaaaaa'
# This commit does not exist, so we should 404.
lowerCamelCase : List[Any] = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
lowerCamelCase : int = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> int:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> Optional[Any]:
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def UpperCAmelCase ( self ) -> Optional[Any]:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_tf
def UpperCAmelCase ( self ) -> str:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_flax
def UpperCAmelCase ( self ) -> Any:
# Flax models don't have labels
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , [] )
| 684 | 0 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=False ) -> Union[str, Any]:
try:
snake_case : Any = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
snake_case : Any = default
else:
# KEY is set, convert it to True or False.
try:
snake_case : List[Any] = strtobool(lowercase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
lowerCamelCase : List[str] = parse_flag_from_env('RUN_SLOW', default=False)
lowerCamelCase : Union[str, Any] = parse_flag_from_env('RUN_REMOTE', default=False)
lowerCamelCase : str = parse_flag_from_env('RUN_LOCAL', default=True)
lowerCamelCase : int = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
lowerCamelCase : List[str] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
lowerCamelCase : Optional[int] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
lowerCamelCase : Union[str, Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
lowerCamelCase : List[str] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
lowerCamelCase : str = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
lowerCamelCase : Tuple = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
lowerCamelCase : List[Any] = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
try:
import faiss # noqa
except ImportError:
snake_case : Tuple = unittest.skip("""test requires faiss""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
try:
import regex # noqa
except ImportError:
snake_case : Tuple = unittest.skip("""test requires regex""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Dict:
try:
import elasticsearch # noqa
except ImportError:
snake_case : Tuple = unittest.skip("""test requires elasticsearch""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[Any]:
try:
import sqlalchemy # noqa
except ImportError:
snake_case : Tuple = unittest.skip("""test requires sqlalchemy""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
if not config.TORCH_AVAILABLE:
snake_case : Optional[Any] = unittest.skip("""test requires PyTorch""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
if not config.TF_AVAILABLE:
snake_case : Union[str, Any] = unittest.skip("""test requires TensorFlow""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Dict:
if not config.JAX_AVAILABLE:
snake_case : List[str] = unittest.skip("""test requires JAX""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[Any]:
if not config.PIL_AVAILABLE:
snake_case : List[Any] = unittest.skip("""test requires Pillow""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Tuple:
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(lowercase )
else:
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Any:
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(lowercase )
else:
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(lowercase )
else:
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Any:
def _require_spacy_model(lowercase ):
try:
import spacy # noqa F401
spacy.load(lowercase )
except ImportError:
return unittest.skip("""test requires spacy""" )(lowercase )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(lowercase ) )(lowercase )
else:
return test_case
return _require_spacy_model
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[int]:
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(lowercase )
else:
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[Any]:
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(lowercase )
else:
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
if not _run_slow_tests or _run_slow_tests == 0:
snake_case : Optional[int] = unittest.skip("""test is slow""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
if not _run_local_tests or _run_local_tests == 0:
snake_case : Tuple = unittest.skip("""test is local""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Tuple:
if not _run_packaged_tests or _run_packaged_tests == 0:
snake_case : Tuple = unittest.skip("""test is packaged""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[Any]:
if not _run_remote_tests or _run_remote_tests == 0:
snake_case : Dict = unittest.skip("""test requires remote""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( *lowercase ) -> int:
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(lowercase ) and name.startswith("""test""" ):
for decorator in decorators:
snake_case : Any = decorator(lowercase )
setattr(cls ,lowercase ,lowercase )
return cls
return decorate
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = 0
_snake_case = 1
_snake_case = 2
@contextmanager
def SCREAMING_SNAKE_CASE__ ( lowercase=OfflineSimulationMode.CONNECTION_FAILS ,lowercase=1E-16 ) -> List[Any]:
snake_case : Any = requests.Session().request
def timeout_request(lowercase ,lowercase ,lowercase ,**lowercase ):
# Change the url to an invalid url so that the connection hangs
snake_case : List[Any] = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
f"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
snake_case : Any = timeout
try:
return online_request(lowercase ,lowercase ,**lowercase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
snake_case : Dict = url
snake_case : Optional[int] = e.args[0]
snake_case : List[Any] = (max_retry_error.args[0].replace("""10.255.255.1""" ,f"""OfflineMock[{url}]""" ),)
snake_case : Dict = (max_retry_error,)
raise
def raise_connection_error(lowercase ,lowercase ,**lowercase ):
raise requests.ConnectionError("""Offline mode is enabled.""" ,request=lowercase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" ,lowercase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" ,lowercase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" ,lowercase ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( *lowercase ,**lowercase ) -> Optional[Any]:
snake_case : str = str(Path().resolve() )
with tempfile.TemporaryDirectory(*lowercase ,**lowercase ) as tmp_dir:
try:
os.chdir(lowercase )
yield
finally:
os.chdir(lowercase )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
import gc
gc.collect()
snake_case : Tuple = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
import gc
gc.collect()
snake_case : List[str] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> List[Any]:
return deepcopy(lowercase ).integers(0 ,100 ,10 ).tolist() == deepcopy(lowercase ).integers(0 ,100 ,10 ).tolist()
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
import decorator
from requests.exceptions import HTTPError
def _wrapper(lowercase ,*lowercase ,**lowercase ):
try:
return func(*lowercase ,**lowercase )
except HTTPError as err:
if str(lowercase ).startswith("""500""" ) or str(lowercase ).startswith("""502""" ):
pytest.xfail(str(lowercase ) )
raise err
return decorator.decorator(_wrapper ,lowercase )
class __lowercase :
"""simple docstring"""
def __init__( self , A , A , A ) -> Optional[int]:
snake_case : List[Any] = returncode
snake_case : Tuple = stdout
snake_case : Dict = stderr
async def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> List[Any]:
while True:
snake_case : List[str] = await stream.readline()
if line:
callback(lowercase )
else:
break
async def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=None ,lowercase=None ,lowercase=None ,lowercase=False ,lowercase=False ) -> _RunOutput:
if echo:
print("""\nRunning: """ ,""" """.join(lowercase ) )
snake_case : Tuple = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=lowercase ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=lowercase ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
snake_case : str = []
snake_case : List[str] = []
def tee(lowercase ,lowercase ,lowercase ,lowercase="" ):
snake_case : List[str] = line.decode("""utf-8""" ).rstrip()
sink.append(lowercase )
if not quiet:
print(lowercase ,lowercase ,file=lowercase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout ,lambda lowercase : tee(lowercase ,lowercase ,sys.stdout ,label="""stdout:""" ) ),
_read_stream(p.stderr ,lambda lowercase : tee(lowercase ,lowercase ,sys.stderr ,label="""stderr:""" ) ),
] ,timeout=lowercase ,)
return _RunOutput(await p.wait() ,lowercase ,lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=None ,lowercase=None ,lowercase=180 ,lowercase=False ,lowercase=True ) -> _RunOutput:
snake_case : Dict = asyncio.get_event_loop()
snake_case : Optional[int] = loop.run_until_complete(
_stream_subprocess(lowercase ,env=lowercase ,stdin=lowercase ,timeout=lowercase ,quiet=lowercase ,echo=lowercase ) )
snake_case : Any = """ """.join(lowercase )
if result.returncode > 0:
snake_case : List[str] = """\n""".join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"""'{cmd_str}' produced no output.""" )
return result
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
snake_case : Optional[Any] = os.environ.get("""PYTEST_XDIST_WORKER""" ,"""gw0""" )
snake_case : Tuple = re.sub(R"""^gw""" ,"""""" ,lowercase ,0 ,re.M )
return int(lowercase )
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
snake_case : Dict = 29500
snake_case : int = pytest_xdist_worker_id()
return port + uniq_delta
| 710 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """audio-spectrogram-transformer"""
def __init__( self , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.0 , A=0.0 , A=0.02 , A=1e-1_2 , A=1_6 , A=True , A=1_0 , A=1_0 , A=1_0_2_4 , A=1_2_8 , **A , ) -> int:
super().__init__(**A )
snake_case : Any = hidden_size
snake_case : Tuple = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Dict = intermediate_size
snake_case : int = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : int = initializer_range
snake_case : int = layer_norm_eps
snake_case : Any = patch_size
snake_case : List[Any] = qkv_bias
snake_case : int = frequency_stride
snake_case : Any = time_stride
snake_case : Union[str, Any] = max_length
snake_case : Any = num_mel_bins
| 684 | 0 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE__ ( lowercase = "" ) -> dict[str, float]:
snake_case : Any = url or """https://www.imdb.com/chart/top/?ref_=nv_mv_250"""
snake_case : Any = BeautifulSoup(requests.get(lowercase ).text ,"""html.parser""" )
snake_case : Optional[Any] = soup.find_all("""td""" ,attrs="""titleColumn""" )
snake_case : List[str] = soup.find_all("""td""" ,class_="""ratingColumn imdbRating""" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(lowercase ,lowercase )
}
def SCREAMING_SNAKE_CASE__ ( lowercase = "IMDb_Top_250_Movies.csv" ) -> None:
snake_case : Dict = get_imdb_top_aaa_movies()
with open(lowercase ,"""w""" ,newline="""""" ) as out_file:
snake_case : Union[str, Any] = csv.writer(lowercase )
writer.writerow(["""Movie title""", """IMDb rating"""] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 711 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase : Any = logging.get_logger(__name__)
class __lowercase (enum.Enum ):
"""simple docstring"""
_snake_case = 0
_snake_case = 1
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """generated"""
def __init__( self , *A , **A ) -> Optional[Any]:
super().__init__(*A , **A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase ( self , A=None , A=None , A=None , A=None , A=None , A=None , **A , ) -> Optional[int]:
snake_case : Tuple = {}
if truncation is not None:
snake_case : Union[str, Any] = truncation
snake_case : Dict = generate_kwargs
snake_case : int = {}
if return_tensors is not None and return_type is None:
snake_case : List[Any] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
snake_case : List[str] = return_type
if clean_up_tokenization_spaces is not None:
snake_case : int = clean_up_tokenization_spaces
if stop_sequence is not None:
snake_case : Tuple = self.tokenizer.encode(A , add_special_tokens=A )
if len(A ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
snake_case : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
return True
def UpperCAmelCase ( self , *A , A ) -> Tuple:
snake_case : Union[str, Any] = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , A ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
snake_case : Union[str, Any] = ([prefix + arg for arg in args[0]],)
snake_case : List[Any] = True
elif isinstance(args[0] , A ):
snake_case : str = (prefix + args[0],)
snake_case : str = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
snake_case : Optional[Any] = self.tokenizer(*A , padding=A , truncation=A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *A , **A ) -> Union[str, Any]:
snake_case : Tuple = super().__call__(*A , **A )
if (
isinstance(args[0] , A )
and all(isinstance(A , A ) for el in args[0] )
and all(len(A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase ( self , A , A=TruncationStrategy.DO_NOT_TRUNCATE , **A ) -> str:
snake_case : Optional[Any] = self._parse_and_tokenize(A , truncation=A , **A )
return inputs
def UpperCAmelCase ( self , A , **A ) -> Tuple:
if self.framework == "pt":
snake_case , snake_case : List[str] = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
snake_case , snake_case : Optional[Any] = tf.shape(model_inputs["""input_ids"""] ).numpy()
snake_case : Dict = generate_kwargs.get("""min_length""" , self.model.config.min_length )
snake_case : str = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(A , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
snake_case : List[str] = self.model.generate(**A , **A )
snake_case : Dict = output_ids.shape[0]
if self.framework == "pt":
snake_case : List[Any] = output_ids.reshape(A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
snake_case : Any = tf.reshape(A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase ( self , A , A=ReturnType.TEXT , A=False ) -> Union[str, Any]:
snake_case : Tuple = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
snake_case : Dict = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
snake_case : int = {
f"""{self.return_name}_text""": self.tokenizer.decode(
A , skip_special_tokens=A , clean_up_tokenization_spaces=A , )
}
records.append(A )
return records
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """summary"""
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
def UpperCAmelCase ( self , A , A , A ) -> bool:
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """translation"""
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def UpperCAmelCase ( self , *A , A=TruncationStrategy.DO_NOT_TRUNCATE , A=None , A=None ) -> Optional[int]:
if getattr(self.tokenizer , """_build_translation_inputs""" , A ):
return self.tokenizer._build_translation_inputs(
*A , return_tensors=self.framework , truncation=A , src_lang=A , tgt_lang=A )
else:
return super()._parse_and_tokenize(*A , truncation=A )
def UpperCAmelCase ( self , A=None , A=None , **A ) -> Union[str, Any]:
snake_case , snake_case , snake_case : str = super()._sanitize_parameters(**A )
if src_lang is not None:
snake_case : Tuple = src_lang
if tgt_lang is not None:
snake_case : str = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
snake_case : Union[str, Any] = kwargs.get("""task""" , self.task )
snake_case : Any = task.split("""_""" )
if task and len(A ) == 4:
# translation, XX, to YY
snake_case : Optional[Any] = items[1]
snake_case : Dict = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
| 684 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Union[str, Any] = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def SCREAMING_SNAKE_CASE__ ( lowercase = 100 ) -> int:
snake_case : Union[str, Any] = 1
snake_case : List[Any] = 2
for i in range(2 ,max_n + 1 ):
snake_case : Optional[int] = pre_numerator
snake_case : Dict = 2 * i // 3 if i % 3 == 0 else 1
snake_case : Dict = cur_numerator
snake_case : List[str] = e_cont * pre_numerator + temp
return sum_digits(lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 712 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
snake_case : int = []
for line in lines:
snake_case : Dict = re.sub(R"""#.*""" ,"""""" ,lowercase ) # remove comments
if line:
filtered_lines.append(lowercase )
snake_case : Optional[int] = """\n""".join(lowercase )
# Make a hash from all this code
snake_case : List[str] = full_str.encode("""utf-8""" )
return shaaaa(lowercase ).hexdigest()
# get importable module names and hash for caching
lowerCamelCase : Any = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
lowerCamelCase : Optional[int] = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
lowerCamelCase : Tuple = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
lowerCamelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 684 | 0 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[Any]: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
with parallel_backend("""spark""" ):
assert ParallelBackendConfig.backend_name == "spark"
snake_case : Optional[Any] = [1, 2, 3]
with pytest.raises(lowercase ):
with parallel_backend("""unsupported backend""" ):
map_nested(lowercase ,lowercase ,num_proc=2 )
with pytest.raises(lowercase ):
with parallel_backend("""unsupported backend""" ):
map_nested(lowercase ,lowercase ,num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("""num_proc""" ,[2, -1] )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
snake_case : Any = [1, 2]
snake_case : List[Any] = {"""a""": 1, """b""": 2}
snake_case : Tuple = {"""a""": [1, 2], """b""": [3, 4]}
snake_case : Optional[Any] = {"""a""": {"""1""": 1}, """b""": 2}
snake_case : Union[str, Any] = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
snake_case : int = [2, 3]
snake_case : Union[str, Any] = {"""a""": 2, """b""": 3}
snake_case : Union[str, Any] = {"""a""": [2, 3], """b""": [4, 5]}
snake_case : Optional[Any] = {"""a""": {"""1""": 2}, """b""": 3}
snake_case : List[Any] = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
with parallel_backend("""spark""" ):
assert map_nested(lowercase ,lowercase ,num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase ,lowercase ,num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase ,lowercase ,num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase ,lowercase ,num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase ,lowercase ,num_proc=lowercase ) == expected_map_nested_sa
| 713 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Tuple:
# Initialise PyTorch model
snake_case : int = RemBertConfig.from_json_file(lowercase )
print("""Building PyTorch model from configuration: {}""".format(str(lowercase ) ) )
snake_case : Tuple = RemBertModel(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowercase ,lowercase ,lowercase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(lowercase ) )
torch.save(model.state_dict() ,lowercase )
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCamelCase : Dict = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 684 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list:
snake_case : Union[str, Any] = len(lowercase )
for i in range(1 ,lowercase ):
snake_case : Union[str, Any] = collection[i]
snake_case : List[Any] = 0
snake_case : Optional[int] = i - 1
while low <= high:
snake_case : Dict = (low + high) // 2
if val < collection[mid]:
snake_case : str = mid - 1
else:
snake_case : Any = mid + 1
for j in range(lowercase ,lowercase ,-1 ):
snake_case : Optional[int] = collection[j - 1]
snake_case : Any = val
return collection
if __name__ == "__main__":
lowerCamelCase : str = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase : Dict = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 714 |
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[str]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Union[str, Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Any:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Dict:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
| 684 | 0 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , A=None , A=None , *A , **A ) -> Optional[int]:
super().__init__(*A , **A )
if config is None:
assert isinstance(self.model , A ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f""" {self.model.__class__}"""
)
snake_case : Tuple = self.model.config
else:
snake_case : Union[str, Any] = config
snake_case : Tuple = data_args
snake_case : Any = self.config.tgt_vocab_size if isinstance(self.config , A ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
""" padding..""" )
if self.args.label_smoothing == 0:
snake_case : List[str] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
snake_case : Dict = label_smoothed_nll_loss
def UpperCAmelCase ( self , A ) -> List[str]:
if self.optimizer is None:
snake_case : Union[str, Any] = ["""bias""", """LayerNorm.weight"""]
snake_case : Dict = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
snake_case : List[str] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
snake_case : List[Any] = Adafactor
snake_case : Optional[int] = {"""scale_parameter""": False, """relative_step""": False}
else:
snake_case : List[str] = AdamW
snake_case : Any = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
snake_case : str = self.args.learning_rate
if self.sharded_ddp:
snake_case : List[Any] = OSS(
params=A , optim=A , **A , )
else:
snake_case : List[Any] = optimizer_cls(A , **A )
if self.lr_scheduler is None:
snake_case : Any = self._get_lr_scheduler(A )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def UpperCAmelCase ( self , A ) -> Any:
snake_case : str = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
snake_case : Optional[Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
snake_case : Optional[int] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
snake_case : int = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=A )
return scheduler
def UpperCAmelCase ( self ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def UpperCAmelCase ( self , A , A , A ) -> List[str]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
snake_case : Any = model(**A , use_cache=A )[0]
snake_case : Any = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
snake_case : int = model(**A , labels=A , use_cache=A )[:2]
else:
# compute label smoothed loss
snake_case : Any = model(**A , use_cache=A )[0]
snake_case : Optional[int] = torch.nn.functional.log_softmax(A , dim=-1 )
snake_case : Union[str, Any] = self.loss_fn(A , A , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def UpperCAmelCase ( self , A , A ) -> int:
snake_case : int = inputs.pop("""labels""" )
snake_case : Union[str, Any] = self._compute_loss(A , A , A )
return loss
def UpperCAmelCase ( self , A , A , A , A = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
snake_case : List[Any] = self._prepare_inputs(A )
snake_case : Tuple = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
snake_case : List[Any] = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **A , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
snake_case : Any = self._pad_tensors_to_max_len(A , gen_kwargs["""max_length"""] )
snake_case : Optional[Any] = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
snake_case : List[Any] = self._compute_loss(A , A , A )
snake_case : int = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
snake_case : Dict = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
snake_case : Union[str, Any] = self._pad_tensors_to_max_len(A , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def UpperCAmelCase ( self , A , A ) -> Union[str, Any]:
# If PAD token is not defined at least EOS token has to be defined
snake_case : Tuple = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f""" padded to `max_length`={max_length}""" )
snake_case : List[str] = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
snake_case : Tuple = tensor
return padded_tensor
| 715 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCamelCase : List[str] = 3
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
print("""Generating primitive root of p""" )
while True:
snake_case : Optional[int] = random.randrange(3 ,lowercase )
if pow(lowercase ,2 ,lowercase ) == 1:
continue
if pow(lowercase ,lowercase ,lowercase ) == 1:
continue
return g
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print("""Generating prime p...""" )
snake_case : Optional[int] = rabin_miller.generate_large_prime(lowercase ) # select large prime number.
snake_case : Optional[int] = primitive_root(lowercase ) # one primitive root on modulo p.
snake_case : Optional[Any] = random.randrange(3 ,lowercase ) # private_key -> have to be greater than 2 for safety.
snake_case : Tuple = cryptomath.find_mod_inverse(pow(lowercase ,lowercase ,lowercase ) ,lowercase )
snake_case : str = (key_size, e_a, e_a, p)
snake_case : Optional[Any] = (key_size, d)
return public_key, private_key
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> None:
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print("""\nWARNING:""" )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
snake_case , snake_case : Optional[Any] = generate_key(lowercase )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
print("""Making key files...""" )
make_key_files("""elgamal""" ,2048 )
print("""Key files generation successful""" )
if __name__ == "__main__":
main()
| 684 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase (UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = OpenAIGPTTokenizer
_snake_case = OpenAIGPTTokenizerFast
_snake_case = True
_snake_case = False
def UpperCAmelCase ( self ) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
snake_case : Union[str, Any] = dict(zip(A , range(len(A ) ) ) )
snake_case : Tuple = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(A ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(A ) )
def UpperCAmelCase ( self , A ) -> List[str]:
return "lower newer", "lower newer"
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Optional[int] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
snake_case : int = """lower"""
snake_case : List[str] = ["""low""", """er</w>"""]
snake_case : List[str] = tokenizer.tokenize(A )
self.assertListEqual(A , A )
snake_case : Optional[Any] = tokens + ["""<unk>"""]
snake_case : List[str] = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def UpperCAmelCase ( self , A=1_5 ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case : str = self.rust_tokenizer_class.from_pretrained(A , **A )
# Simple input
snake_case : Dict = """This is a simple input"""
snake_case : str = ["""This is a simple input 1""", """This is a simple input 2"""]
snake_case : Any = ("""This is a simple input""", """This is a pair""")
snake_case : Optional[int] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(A , tokenizer_r.encode , A , max_length=A , padding="""max_length""" )
# Simple input
self.assertRaises(A , tokenizer_r.encode_plus , A , max_length=A , padding="""max_length""" )
# Simple input
self.assertRaises(
A , tokenizer_r.batch_encode_plus , A , max_length=A , padding="""max_length""" , )
# Pair input
self.assertRaises(A , tokenizer_r.encode , A , max_length=A , padding="""max_length""" )
# Pair input
self.assertRaises(A , tokenizer_r.encode_plus , A , max_length=A , padding="""max_length""" )
# Pair input
self.assertRaises(
A , tokenizer_r.batch_encode_plus , A , max_length=A , padding="""max_length""" , )
def UpperCAmelCase ( self ) -> Optional[int]:
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
| 716 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
snake_case : Dict = _modexpt(lowercase ,exponent // 2 ,lowercase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowercase ,exponent - 1 ,lowercase )) % modulo_value
def SCREAMING_SNAKE_CASE__ ( lowercase = 1777 ,lowercase = 1855 ,lowercase = 8 ) -> int:
snake_case : int = base
for _ in range(1 ,lowercase ):
snake_case : List[str] = _modexpt(lowercase ,lowercase ,10**digits )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCamelCase : Union[str, Any] = 1_6
lowerCamelCase : Tuple = 3_2
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase = 16 ,lowercase = "bert-base-cased" ) -> int:
snake_case : Optional[int] = AutoTokenizer.from_pretrained(lowercase )
snake_case : Optional[Any] = load_dataset("""glue""" ,"""mrpc""" )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
snake_case : Any = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=lowercase ,max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case : Optional[int] = datasets.map(
lowercase ,batched=lowercase ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,load_from_cache_file=lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case : Union[str, Any] = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase ,padding="""max_length""" ,max_length=128 ,return_tensors="""pt""" )
return tokenizer.pad(lowercase ,padding="""longest""" ,return_tensors="""pt""" )
# Instantiate dataloaders.
snake_case : str = DataLoader(
tokenized_datasets["""train"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase )
snake_case : Union[str, Any] = DataLoader(
tokenized_datasets["""validation"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase )
return train_dataloader, eval_dataloader
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> Optional[int]:
model.eval()
snake_case : Union[str, Any] = 0
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case : int = model(**lowercase )
snake_case : List[str] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
snake_case : Any = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase ) - 1:
snake_case : Union[str, Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case : str = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase ,references=lowercase ,)
snake_case : List[str] = metric.compute()
return eval_metric["accuracy"]
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Any:
# Initialize accelerator
snake_case : Tuple = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case : Optional[Any] = config["""lr"""]
snake_case : Union[str, Any] = int(config["""num_epochs"""] )
snake_case : Dict = int(config["""seed"""] )
snake_case : Union[str, Any] = int(config["""batch_size"""] )
snake_case : Tuple = args.model_name_or_path
set_seed(lowercase )
snake_case : Optional[Any] = get_dataloaders(lowercase ,lowercase ,lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(lowercase ,return_dict=lowercase )
# Instantiate optimizer
snake_case : List[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case : List[str] = optimizer_cls(params=model.parameters() ,lr=lowercase )
if accelerator.state.deepspeed_plugin is not None:
snake_case : Optional[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
snake_case : List[Any] = 1
snake_case : Tuple = (len(lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case : Any = get_linear_schedule_with_warmup(
optimizer=lowercase ,num_warmup_steps=0 ,num_training_steps=lowercase ,)
else:
snake_case : Optional[int] = DummyScheduler(lowercase ,total_num_steps=lowercase ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case : List[Any] = accelerator.prepare(
lowercase ,lowercase ,lowercase ,lowercase ,lowercase )
# We need to keep track of how many total steps we have iterated over
snake_case : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case : Union[str, Any] = 0
snake_case : Union[str, Any] = evaluate.load("""glue""" ,"""mrpc""" )
snake_case : Any = num_epochs
if args.partial_train_epoch is not None:
snake_case : str = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
snake_case : str = args.resume_from_checkpoint.split("""epoch_""" )[1]
snake_case : int = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
snake_case : List[str] = int(lowercase ) + 1
snake_case : Union[str, Any] = evaluation_loop(lowercase ,lowercase ,lowercase ,lowercase )
accelerator.print("""resumed checkpoint performance:""" ,lowercase )
accelerator.print("""resumed checkpoint's scheduler's lr:""" ,lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" ,optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir ,f"""state_{starting_epoch-1}.json""" ) ,"""r""" ) as f:
snake_case : Any = json.load(lowercase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
snake_case : List[Any] = {}
for epoch in range(lowercase ,lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
snake_case : str = model(**lowercase )
snake_case : str = outputs.loss
snake_case : Dict = loss / gradient_accumulation_steps
accelerator.backward(lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
snake_case : Optional[int] = f"""epoch_{epoch}"""
snake_case : Optional[int] = os.path.join(args.output_dir ,lowercase )
accelerator.save_state(lowercase )
snake_case : Dict = evaluation_loop(lowercase ,lowercase ,lowercase ,lowercase )
snake_case : str = accuracy
snake_case : Optional[Any] = lr_scheduler.get_lr()[0]
snake_case : List[str] = optimizer.param_groups[0]["""lr"""]
snake_case : int = epoch
snake_case : int = overall_step
accelerator.print(f"""epoch {epoch}:""" ,lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,f"""state_{epoch}.json""" ) ,"""w""" ) as f:
json.dump(lowercase ,lowercase )
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
snake_case : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" ,type=lowercase ,default="""bert-base-cased""" ,help="""Path to pretrained model or model identifier from huggingface.co/models.""" ,required=lowercase ,)
parser.add_argument(
"""--output_dir""" ,type=lowercase ,default=""".""" ,help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" ,)
parser.add_argument(
"""--resume_from_checkpoint""" ,type=lowercase ,default=lowercase ,help="""If the training should continue from a checkpoint folder.""" ,)
parser.add_argument(
"""--partial_train_epoch""" ,type=lowercase ,default=lowercase ,help="""If passed, the training will stop after this number of epochs.""" ,)
parser.add_argument(
"""--num_epochs""" ,type=lowercase ,default=2 ,help="""Number of train epochs.""" ,)
snake_case : Optional[Any] = parser.parse_args()
snake_case : Optional[Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowercase ,lowercase )
if __name__ == "__main__":
main()
| 717 |
from itertools import product
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list[int]:
snake_case : Tuple = sides_number
snake_case : List[str] = max_face_number * dice_number
snake_case : Any = [0] * (max_total + 1)
snake_case : int = 1
snake_case : List[str] = range(lowercase ,max_face_number + 1 )
for dice_numbers in product(lowercase ,repeat=lowercase ):
snake_case : Any = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def SCREAMING_SNAKE_CASE__ ( ) -> float:
snake_case : List[str] = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
snake_case : str = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
snake_case : Optional[int] = 0
snake_case : List[str] = 9
snake_case : Union[str, Any] = 4 * 9
snake_case : Dict = 6
for peter_total in range(lowercase ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
snake_case : str = (4**9) * (6**6)
snake_case : int = peter_wins_count / total_games_number
snake_case : Optional[int] = round(lowercase ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : Any = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """dpt"""
def __init__( self , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.0 , A=0.0 , A=0.02 , A=1e-1_2 , A=3_8_4 , A=1_6 , A=3 , A=False , A=True , A=[2, 5, 8, 1_1] , A="project" , A=[4, 2, 1, 0.5] , A=[9_6, 1_9_2, 3_8_4, 7_6_8] , A=2_5_6 , A=-1 , A=False , A=True , A=0.4 , A=2_5_5 , A=0.1 , A=[1, 1_0_2_4, 2_4, 2_4] , A=[0, 1] , A=None , **A , ) -> Tuple:
super().__init__(**A )
snake_case : List[Any] = hidden_size
snake_case : List[Any] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
snake_case : Optional[Any] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
snake_case : Optional[Any] = BitConfig(**A )
elif isinstance(A , A ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
snake_case : Tuple = BitConfig(**A )
elif isinstance(A , A ):
snake_case : Dict = backbone_config
else:
raise ValueError(
f"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
snake_case : Union[str, Any] = backbone_featmap_shape
snake_case : str = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
snake_case : Union[str, Any] = None
snake_case : Tuple = None
snake_case : List[Any] = []
snake_case : Tuple = num_hidden_layers
snake_case : Dict = num_attention_heads
snake_case : Dict = intermediate_size
snake_case : Optional[Any] = hidden_act
snake_case : Optional[Any] = hidden_dropout_prob
snake_case : int = attention_probs_dropout_prob
snake_case : Tuple = initializer_range
snake_case : int = layer_norm_eps
snake_case : Tuple = image_size
snake_case : int = patch_size
snake_case : Union[str, Any] = num_channels
snake_case : Tuple = qkv_bias
snake_case : List[str] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
snake_case : Any = readout_type
snake_case : int = reassemble_factors
snake_case : Union[str, Any] = neck_hidden_sizes
snake_case : Optional[int] = fusion_hidden_size
snake_case : Optional[int] = head_in_index
snake_case : Tuple = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
snake_case : int = use_auxiliary_head
snake_case : Tuple = auxiliary_loss_weight
snake_case : str = semantic_loss_ignore_index
snake_case : Tuple = semantic_classifier_dropout
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Any = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
snake_case : Optional[Any] = self.backbone_config.to_dict()
snake_case : Tuple = self.__class__.model_type
return output | 718 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 684 | 0 |
from __future__ import annotations
import time
lowerCamelCase : int = list[tuple[int, int]]
lowerCamelCase : List[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCamelCase : str = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __lowercase :
"""simple docstring"""
def __init__( self , A , A , A , A , A ) -> str:
snake_case : int = pos_x
snake_case : Union[str, Any] = pos_y
snake_case : Optional[Any] = (pos_y, pos_x)
snake_case : Optional[int] = goal_x
snake_case : List[Any] = goal_y
snake_case : str = parent
class __lowercase :
"""simple docstring"""
def __init__( self , A , A ) -> int:
snake_case : Optional[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , A )
snake_case : Dict = Node(goal[1] , goal[0] , goal[1] , goal[0] , A )
snake_case : Dict = [self.start]
snake_case : Optional[Any] = False
def UpperCAmelCase ( self ) -> Path | None:
while self.node_queue:
snake_case : Optional[int] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
snake_case : Optional[int] = True
return self.retrace_path(A )
snake_case : List[str] = self.get_successors(A )
for node in successors:
self.node_queue.append(A )
if not self.reached:
return [self.start.pos]
return None
def UpperCAmelCase ( self , A ) -> list[Node]:
snake_case : Tuple = []
for action in delta:
snake_case : List[str] = parent.pos_x + action[1]
snake_case : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(A , A , self.target.pos_y , self.target.pos_x , A ) )
return successors
def UpperCAmelCase ( self , A ) -> Path:
snake_case : Optional[int] = node
snake_case : int = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case : Dict = current_node.parent
path.reverse()
return path
class __lowercase :
"""simple docstring"""
def __init__( self , A , A ) -> Union[str, Any]:
snake_case : Union[str, Any] = BreadthFirstSearch(A , A )
snake_case : List[str] = BreadthFirstSearch(A , A )
snake_case : Optional[int] = False
def UpperCAmelCase ( self ) -> Path | None:
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
snake_case : List[Any] = self.fwd_bfs.node_queue.pop(0 )
snake_case : List[Any] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
snake_case : List[str] = True
return self.retrace_bidirectional_path(
A , A )
snake_case : str = current_bwd_node
snake_case : List[Any] = current_fwd_node
snake_case : Optional[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(A ),
self.bwd_bfs: self.bwd_bfs.get_successors(A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def UpperCAmelCase ( self , A , A ) -> Path:
snake_case : List[str] = self.fwd_bfs.retrace_path(A )
snake_case : Optional[Any] = self.bwd_bfs.retrace_path(A )
bwd_path.pop()
bwd_path.reverse()
snake_case : Tuple = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
lowerCamelCase : List[Any] = (0, 0)
lowerCamelCase : str = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowerCamelCase : Union[str, Any] = time.time()
lowerCamelCase : Any = BreadthFirstSearch(init, goal)
lowerCamelCase : List[Any] = bfs.search()
lowerCamelCase : List[Any] = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
lowerCamelCase : Tuple = time.time()
lowerCamelCase : List[Any] = BidirectionalBreadthFirstSearch(init, goal)
lowerCamelCase : Dict = bd_bfs.search()
lowerCamelCase : List[str] = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time)
| 719 |
import os
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
with open(os.path.dirname(lowercase ) + """/grid.txt""" ) as f:
snake_case : Tuple = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowercase ) for x in f.readline().split()] )
snake_case : Optional[Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
snake_case : List[Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
snake_case : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
snake_case : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
snake_case : str = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
snake_case : int = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
snake_case : int = temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
snake_case : Any = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
snake_case : Any = temp
return maximum
if __name__ == "__main__":
print(solution())
| 684 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Optional[Any] = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
snake_case : Any = hex_num[0] == """-"""
if is_negative:
snake_case : int = hex_num[1:]
try:
snake_case : List[Any] = int(lowercase ,16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
snake_case : Dict = """"""
while int_num > 0:
snake_case : Dict = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list:
for i in range(len(lowercase ) - 1 ,0 ,-1 ):
snake_case : Any = False
for j in range(lowercase ,0 ,-1 ):
if unsorted[j] < unsorted[j - 1]:
snake_case , snake_case : Optional[Any] = unsorted[j - 1], unsorted[j]
snake_case : Dict = True
for j in range(lowercase ):
if unsorted[j] > unsorted[j + 1]:
snake_case , snake_case : Dict = unsorted[j + 1], unsorted[j]
snake_case : Tuple = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Any = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase : Optional[int] = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 684 | 0 |
import heapq
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> set[int]:
snake_case : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowercase ,[-1 * len(lowercase ), (key, value)] )
# chosen_vertices = set of chosen vertices
snake_case : List[Any] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
snake_case : Union[str, Any] = heapq.heappop(lowercase )[1][0]
chosen_vertices.add(lowercase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
snake_case : Any = elem[1][1].index(lowercase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowercase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Dict = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 721 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Any = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
lowerCamelCase : Any = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
lowerCamelCase : Optional[int] = {
'jukebox': 5_1_2,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_LYRIC_TOKENS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A , A , A=["v3", "v2", "v2"] , A=5_1_2 , A=5 , A="<|endoftext|>" , **A , ) -> Optional[Any]:
snake_case : Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
super().__init__(
unk_token=A , n_genres=A , version=A , max_n_lyric_tokens=A , **A , )
snake_case : Optional[Any] = version
snake_case : Optional[Any] = max_n_lyric_tokens
snake_case : Tuple = n_genres
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : Union[str, Any] = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : str = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : List[str] = json.load(A )
snake_case : Tuple = r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 7_9:
snake_case : Optional[Any] = oov.replace(r"""\-'""" , r"""\-+'""" )
snake_case : Optional[Any] = regex.compile(A )
snake_case : Optional[Any] = {v: k for k, v in self.artists_encoder.items()}
snake_case : int = {v: k for k, v in self.genres_encoder.items()}
snake_case : List[Any] = {v: k for k, v in self.lyrics_encoder.items()}
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def UpperCAmelCase ( self ) -> str:
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def UpperCAmelCase ( self , A , A , A ) -> Optional[Any]:
snake_case : Optional[int] = [self.artists_encoder.get(A , 0 ) for artist in list_artists]
for genres in range(len(A ) ):
snake_case : Optional[int] = [self.genres_encoder.get(A , 0 ) for genre in list_genres[genres]]
snake_case : Union[str, Any] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
snake_case : Optional[Any] = [[self.lyrics_encoder.get(A , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def UpperCAmelCase ( self , A ) -> List[str]:
return list(A )
def UpperCAmelCase ( self , A , A , A , **A ) -> List[str]:
snake_case , snake_case , snake_case : Any = self.prepare_for_tokenization(A , A , A )
snake_case : Tuple = self._tokenize(A )
return artist, genre, lyrics
def UpperCAmelCase ( self , A , A , A , A = False ) -> Tuple[str, str, str, Dict[str, Any]]:
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
snake_case : Tuple = artists[idx].lower()
snake_case : List[Any] = [genres[idx].lower()]
else:
snake_case : Union[str, Any] = self._normalize(artists[idx] ) + """.v2"""
snake_case : Any = [
self._normalize(A ) + """.v2""" for genre in genres[idx].split("""_""" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
snake_case : str = regex.compile(r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" )
snake_case : Dict = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
snake_case : Union[str, Any] = {vocab[index]: index + 1 for index in range(len(A ) )}
snake_case : Optional[int] = 0
snake_case : Union[str, Any] = len(A ) + 1
snake_case : Optional[int] = self.vocab
snake_case : str = {v: k for k, v in self.vocab.items()}
snake_case : int = """"""
else:
snake_case : Optional[int] = regex.compile(r"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""" )
snake_case : int = self._run_strip_accents(A )
snake_case : Any = lyrics.replace("""\\""" , """\n""" )
snake_case : Tuple = self.out_of_vocab.sub("""""" , A ), [], []
return artists, genres, lyrics
def UpperCAmelCase ( self , A ) -> List[Any]:
snake_case : int = unicodedata.normalize("""NFD""" , A )
snake_case : int = []
for char in text:
snake_case : Optional[Any] = unicodedata.category(A )
if cat == "Mn":
continue
output.append(A )
return "".join(A )
def UpperCAmelCase ( self , A ) -> str:
snake_case : Dict = (
[chr(A ) for i in range(ord("""a""" ) , ord("""z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""A""" ) , ord("""Z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""0""" ) , ord("""9""" ) + 1 )]
+ ["""."""]
)
snake_case : Dict = frozenset(A )
snake_case : Dict = re.compile(r"""_+""" )
snake_case : str = """""".join([c if c in accepted else """_""" for c in text.lower()] )
snake_case : List[Any] = pattern.sub("""_""" , A ).strip("""_""" )
return text
def UpperCAmelCase ( self , A ) -> str:
return " ".join(A )
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[Any]:
# Convert to TensorType
if not isinstance(A , A ):
snake_case : Tuple = TensorType(A )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""" )
import tensorflow as tf
snake_case : Union[str, Any] = tf.constant
snake_case : int = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""" )
import torch
snake_case : List[str] = torch.tensor
snake_case : Optional[Any] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""" )
import jax.numpy as jnp # noqa: F811
snake_case : Optional[int] = jnp.array
snake_case : Dict = _is_jax
else:
snake_case : List[str] = np.asarray
snake_case : Tuple = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
snake_case : Any = [inputs]
if not is_tensor(A ):
snake_case : List[Any] = as_tensor(A )
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""" )
return inputs
def __call__( self , A , A , A="" , A="pt" ) -> BatchEncoding:
snake_case : List[str] = [0, 0, 0]
snake_case : List[str] = [artist] * len(self.version )
snake_case : List[Any] = [genres] * len(self.version )
snake_case , snake_case , snake_case : Optional[int] = self.tokenize(A , A , A )
snake_case , snake_case , snake_case : int = self._convert_token_to_id(A , A , A )
snake_case : Any = [-INFINITY] * len(full_tokens[-1] )
snake_case : int = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=A )
for i in range(len(self.version ) )
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks} )
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=A ) )
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=A ) )
snake_case : Tuple = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=A ) )
return (artists_file, genres_file, lyrics_file)
def UpperCAmelCase ( self , A , A , A ) -> List[Any]:
snake_case : Optional[int] = self.artists_decoder.get(A )
snake_case : Optional[Any] = [self.genres_decoder.get(A ) for genre in genres_index]
snake_case : Optional[int] = [self.lyrics_decoder.get(A ) for character in lyric_index]
return artist, genres, lyrics
| 684 | 0 |
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE__ ( lowercase = None ) -> int:
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
snake_case : int = nums[0]
for i in range(1 ,len(lowercase ) ):
snake_case : int = nums[i]
snake_case : str = max(lowercase ,ans + num ,lowercase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowerCamelCase : str = int(input('Enter number of elements : ').strip())
lowerCamelCase : Any = list(map(int, input('\nEnter the numbers : ').strip().split()))[:n]
print(max_subsequence_sum(array))
| 700 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
snake_case : str = len(lowercase )
snake_case : Tuple = []
for i in range(len(lowercase ) - pat_len + 1 ):
snake_case : str = True
for j in range(lowercase ):
if s[i + j] != pattern[j]:
snake_case : Dict = False
break
if match_found:
position.append(lowercase )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 684 | 0 |
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A="</s>" , A="<unk>" , A="<pad>" , A=1_2_5 , A=None , **A , ) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
snake_case : str = [f"""<extra_id_{i}>""" for i in range(A )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
snake_case : int = len(set(filter(lambda A : bool("""extra_id""" in str(A ) ) , A ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
""" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"""
""" extra_ids tokens""" )
snake_case : str = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else pad_token
snake_case : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else eos_token
snake_case : int = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
super().__init__(
eos_token=A , unk_token=A , pad_token=A , extra_ids=A , additional_special_tokens=A , **A , )
snake_case : Union[str, Any] = extra_ids
snake_case : Tuple = 2**8 # utf is 8 bits
# define special tokens dict
snake_case : Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
snake_case : str = len(self.special_tokens_encoder )
snake_case : Any = len(A )
for i, token in enumerate(A ):
snake_case : Optional[int] = self.vocab_size + i - n
snake_case : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def UpperCAmelCase ( self ) -> Union[str, Any]:
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(A )) + [1]
return ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
def UpperCAmelCase ( self , A ) -> List[int]:
if len(A ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def UpperCAmelCase ( self , A , A = None ) -> List[int]:
snake_case : Any = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCAmelCase ( self , A , A = None ) -> List[int]:
snake_case : Dict = self._add_eos_if_not_present(A )
if token_ids_a is None:
return token_ids_a
else:
snake_case : Optional[int] = self._add_eos_if_not_present(A )
return token_ids_a + token_ids_a
def UpperCAmelCase ( self , A ) -> List[str]:
snake_case : Tuple = [chr(A ) for i in text.encode("""utf-8""" )]
return tokens
def UpperCAmelCase ( self , A ) -> Dict:
if token in self.special_tokens_encoder:
snake_case : str = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
snake_case : Union[str, Any] = self.added_tokens_encoder[token]
elif len(A ) != 1:
snake_case : List[str] = self.unk_token_id
else:
snake_case : Union[str, Any] = ord(A ) + self._num_special_tokens
return token_id
def UpperCAmelCase ( self , A ) -> Optional[Any]:
if index in self.special_tokens_decoder:
snake_case : str = self.special_tokens_decoder[index]
else:
snake_case : List[Any] = chr(index - self._num_special_tokens )
return token
def UpperCAmelCase ( self , A ) -> Tuple:
snake_case : Optional[Any] = B""""""
for token in tokens:
if token in self.special_tokens_decoder:
snake_case : int = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.added_tokens_decoder:
snake_case : List[str] = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.special_tokens_encoder:
snake_case : List[Any] = token.encode("""utf-8""" )
elif token in self.added_tokens_encoder:
snake_case : List[Any] = token.encode("""utf-8""" )
else:
snake_case : Tuple = bytes([ord(A )] )
bstring += tok_string
snake_case : Optional[Any] = bstring.decode("""utf-8""" , errors="""ignore""" )
return string
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
return ()
| 701 |
import numpy as np
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : Optional[int] = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
lowerCamelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 702 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase : Tuple = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 684 | 0 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 703 |
lowerCamelCase : Union[str, Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCamelCase : Tuple = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCamelCase : Union[str, Any] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 684 | 0 |
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
lowerCamelCase : List[str] = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: ')))
print('Googling.....')
lowerCamelCase : str = f"""https://www.google.com/search?q={query}&num=100"""
lowerCamelCase : List[Any] = requests.get(
url,
headers={'User-Agent': str(UserAgent().random)},
)
try:
lowerCamelCase : List[str] = (
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'yuRUbf'})
.find('a')
.get('href')
)
except AttributeError:
lowerCamelCase : Optional[Any] = parse_qs(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'kCrYT'})
.find('a')
.get('href')
)['url'][0]
webbrowser.open(link)
| 704 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'vocab_file': 'spm_char.model'}
lowerCamelCase : List[str] = {
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
lowerCamelCase : List[Any] = {
'microsoft/speecht5_asr': 1_0_2_4,
'microsoft/speecht5_tts': 1_0_2_4,
'microsoft/speecht5_vc': 1_0_2_4,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A="<s>" , A="</s>" , A="<unk>" , A="<pad>" , A = None , **A , ) -> None:
snake_case : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , pad_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
snake_case : Tuple = vocab_file
snake_case : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def UpperCAmelCase ( self ) -> List[Any]:
return self.sp_model.get_piece_size()
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Any = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[str]:
snake_case : Optional[Any] = self.__dict__.copy()
snake_case : Optional[Any] = None
return state
def __setstate__( self , A ) -> Tuple:
snake_case : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case : List[Any] = {}
snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def UpperCAmelCase ( self , A ) -> Tuple:
return self.sp_model.piece_to_id(A )
def UpperCAmelCase ( self , A ) -> int:
snake_case : Union[str, Any] = self.sp_model.IdToPiece(A )
return token
def UpperCAmelCase ( self , A ) -> Tuple:
snake_case : Optional[int] = []
snake_case : str = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A ) + token
snake_case : Dict = []
else:
current_sub_tokens.append(A )
out_string += self.sp_model.decode(A )
return out_string.strip()
def UpperCAmelCase ( self , A , A=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
snake_case : Any = [1]
if token_ids_a is None:
return ([0] * len(A )) + suffix_ones
return ([0] * len(A )) + ([0] * len(A )) + suffix_ones
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Optional[Any] = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , """wb""" ) as fi:
snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 684 | 0 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
if len(lowercase ) != 2 or len(a[0] ) != 2 or len(lowercase ) != 2 or len(b[0] ) != 2:
raise Exception("""Matrices are not 2x2""" )
snake_case : List[str] = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> List[str]:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowercase ) )
]
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Optional[int]:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowercase ) )
]
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[list, list, list, list]:
if len(lowercase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("""Odd matrices are not supported!""" )
snake_case : Tuple = len(lowercase )
snake_case : Dict = matrix_length // 2
snake_case : List[Any] = [[a[i][j] for j in range(lowercase ,lowercase )] for i in range(lowercase )]
snake_case : Optional[int] = [
[a[i][j] for j in range(lowercase ,lowercase )] for i in range(lowercase ,lowercase )
]
snake_case : Optional[int] = [[a[i][j] for j in range(lowercase )] for i in range(lowercase )]
snake_case : Any = [[a[i][j] for j in range(lowercase )] for i in range(lowercase ,lowercase )]
return top_left, top_right, bot_left, bot_right
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[int, int]:
return len(lowercase ), len(matrix[0] )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> None:
print("""\n""".join(str(lowercase ) for line in matrix ) )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
if matrix_dimensions(lowercase ) == (2, 2):
return default_matrix_multiplication(lowercase ,lowercase )
snake_case : Any = split_matrix(lowercase )
snake_case : Dict = split_matrix(lowercase )
snake_case : Optional[int] = actual_strassen(lowercase ,matrix_subtraction(lowercase ,lowercase ) )
snake_case : Any = actual_strassen(matrix_addition(lowercase ,lowercase ) ,lowercase )
snake_case : Union[str, Any] = actual_strassen(matrix_addition(lowercase ,lowercase ) ,lowercase )
snake_case : Any = actual_strassen(lowercase ,matrix_subtraction(lowercase ,lowercase ) )
snake_case : List[Any] = actual_strassen(matrix_addition(lowercase ,lowercase ) ,matrix_addition(lowercase ,lowercase ) )
snake_case : Union[str, Any] = actual_strassen(matrix_subtraction(lowercase ,lowercase ) ,matrix_addition(lowercase ,lowercase ) )
snake_case : Tuple = actual_strassen(matrix_subtraction(lowercase ,lowercase ) ,matrix_addition(lowercase ,lowercase ) )
snake_case : str = matrix_addition(matrix_subtraction(matrix_addition(lowercase ,lowercase ) ,lowercase ) ,lowercase )
snake_case : Union[str, Any] = matrix_addition(lowercase ,lowercase )
snake_case : Tuple = matrix_addition(lowercase ,lowercase )
snake_case : Tuple = matrix_subtraction(matrix_subtraction(matrix_addition(lowercase ,lowercase ) ,lowercase ) ,lowercase )
# construct the new matrix from our 4 quadrants
snake_case : int = []
for i in range(len(lowercase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowercase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
if matrix_dimensions(lowercase )[1] != matrix_dimensions(lowercase )[0]:
snake_case : Any = (
"""Unable to multiply these matrices, please check the dimensions.\n"""
f"""Matrix A: {matrixa}\n"""
f"""Matrix B: {matrixa}"""
)
raise Exception(lowercase )
snake_case : Dict = matrix_dimensions(lowercase )
snake_case : Any = matrix_dimensions(lowercase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
snake_case : Dict = max(*lowercase ,*lowercase )
snake_case : Union[str, Any] = int(math.pow(2 ,math.ceil(math.loga(lowercase ) ) ) )
snake_case : List[str] = matrixa
snake_case : List[Any] = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 ,lowercase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,lowercase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,lowercase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
snake_case : List[str] = actual_strassen(lowercase ,lowercase )
# Removing the additional zeros
for i in range(0 ,lowercase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,lowercase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
lowerCamelCase : Optional[int] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
lowerCamelCase : str = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 705 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """gpt_neox_japanese"""
def __init__( self , A=3_2_0_0_0 , A=2_5_6_0 , A=3_2 , A=3_2 , A=4 , A="gelu" , A=1.00 , A=1_0_0_0_0 , A=2_0_4_8 , A=0.02 , A=1e-5 , A=True , A=3_1_9_9_6 , A=3_1_9_9_9 , A=0.1 , A=0.0 , **A , ) -> str:
super().__init__(bos_token_id=A , eos_token_id=A , **A )
snake_case : Optional[Any] = vocab_size
snake_case : Optional[Any] = max_position_embeddings
snake_case : Union[str, Any] = hidden_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : Optional[int] = intermediate_multiple_size
snake_case : int = hidden_act
snake_case : str = rotary_pct
snake_case : Optional[Any] = rotary_emb_base
snake_case : Any = initializer_range
snake_case : Any = layer_norm_eps
snake_case : Optional[Any] = use_cache
snake_case : Tuple = attention_dropout
snake_case : Tuple = hidden_dropout
| 684 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : List[str] = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """openai-gpt"""
_snake_case = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , A=4_0_4_7_8 , A=5_1_2 , A=7_6_8 , A=1_2 , A=1_2 , A="gelu" , A=0.1 , A=0.1 , A=0.1 , A=1e-5 , A=0.02 , A="cls_index" , A=True , A=None , A=True , A=0.1 , **A , ) -> str:
snake_case : Dict = vocab_size
snake_case : str = n_positions
snake_case : str = n_embd
snake_case : Union[str, Any] = n_layer
snake_case : List[str] = n_head
snake_case : List[Any] = afn
snake_case : List[Any] = resid_pdrop
snake_case : Optional[int] = embd_pdrop
snake_case : Tuple = attn_pdrop
snake_case : Tuple = layer_norm_epsilon
snake_case : str = initializer_range
snake_case : Optional[Any] = summary_type
snake_case : Union[str, Any] = summary_use_proj
snake_case : Any = summary_activation
snake_case : Any = summary_first_dropout
snake_case : List[Any] = summary_proj_to_labels
super().__init__(**A )
| 706 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Optional[Any] = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
snake_case : Any = hex_num[0] == """-"""
if is_negative:
snake_case : int = hex_num[1:]
try:
snake_case : List[Any] = int(lowercase ,16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
snake_case : Dict = """"""
while int_num > 0:
snake_case : Dict = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( lowercase = 4 ) -> list[list[int]]:
snake_case : str = abs(lowercase ) or 4
return [[1 + x + y * row_size for x in range(lowercase )] for y in range(lowercase )]
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list[list[int]]:
return reverse_row(transpose(lowercase ) )
# OR.. transpose(reverse_column(matrix))
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list[list[int]]:
return reverse_row(reverse_column(lowercase ) )
# OR.. reverse_column(reverse_row(matrix))
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list[list[int]]:
return reverse_column(transpose(lowercase ) )
# OR.. transpose(reverse_row(matrix))
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list[list[int]]:
snake_case : Tuple = [list(lowercase ) for x in zip(*lowercase )]
return matrix
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list[list[int]]:
snake_case : List[str] = matrix[::-1]
return matrix
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list[list[int]]:
snake_case : Dict = [x[::-1] for x in matrix]
return matrix
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> None:
for i in matrix:
print(*lowercase )
if __name__ == "__main__":
lowerCamelCase : Dict = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
lowerCamelCase : Union[str, Any] = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
lowerCamelCase : List[str] = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix))
| 707 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""pixel_values"""]
def __init__( self , A = True , A = None , A = PIL.Image.BICUBIC , A = True , A = None , A = 1 / 2_5_5 , A = True , A = True , A = None , A = None , **A , ) -> None:
super().__init__(**A )
snake_case : int = size if size is not None else {"""height""": 2_5_6, """width""": 2_5_6}
snake_case : int = get_size_dict(A )
snake_case : Optional[Any] = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
snake_case : Dict = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = do_resize
snake_case : str = size
snake_case : Tuple = resample
snake_case : Any = do_center_crop
snake_case : Tuple = crop_size
snake_case : int = do_rescale
snake_case : Dict = rescale_factor
snake_case : Union[str, Any] = do_normalize
snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase ( self , A , A , A = PIL.Image.BICUBIC , A = None , **A , ) -> np.ndarray:
snake_case : Dict = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
A , size=(size["""height"""], size["""width"""]) , resample=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> np.ndarray:
snake_case : Any = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(A , size=(size["""height"""], size["""width"""]) , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> Tuple:
return rescale(A , scale=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A , A = None , **A , ) -> np.ndarray:
return normalize(A , mean=A , std=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A = None , A = None , A=None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
snake_case : str = do_resize if do_resize is not None else self.do_resize
snake_case : Dict = resample if resample is not None else self.resample
snake_case : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Tuple = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case : int = image_mean if image_mean is not None else self.image_mean
snake_case : List[str] = image_std if image_std is not None else self.image_std
snake_case : Dict = size if size is not None else self.size
snake_case : Tuple = get_size_dict(A )
snake_case : Dict = crop_size if crop_size is not None else self.crop_size
snake_case : List[str] = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case : Optional[Any] = [to_numpy_array(A ) for image in images]
if do_resize:
snake_case : Dict = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_center_crop:
snake_case : List[str] = [self.center_crop(image=A , size=A ) for image in images]
if do_rescale:
snake_case : List[str] = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
snake_case : str = [self.normalize(image=A , mean=A , std=A ) for image in images]
snake_case : Union[str, Any] = [to_channel_dimension_format(A , A ) for image in images]
snake_case : List[Any] = {"""pixel_values""": images}
return BatchFeature(data=A , tensor_type=A )
| 684 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
lowerCamelCase : Optional[Any] = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
lowerCamelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 708 |
import inspect
import unittest
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase ( self ) -> Tuple:
import diffusers
from diffusers.dependency_versions_table import deps
snake_case : List[str] = inspect.getmembers(A , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
snake_case : Tuple = """k-diffusion"""
elif backend == "invisible_watermark":
snake_case : Optional[int] = """invisible-watermark"""
assert backend in deps, f"""{backend} is not in the deps table!"""
| 684 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : Dict = {
'configuration_clipseg': [
'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPSegConfig',
'CLIPSegTextConfig',
'CLIPSegVisionConfig',
],
'processing_clipseg': ['CLIPSegProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Any = [
'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPSegModel',
'CLIPSegPreTrainedModel',
'CLIPSegTextModel',
'CLIPSegVisionModel',
'CLIPSegForImageSegmentation',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
lowerCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 709 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
lowerCamelCase : Union[str, Any] = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
lowerCamelCase : List[Any] = 'main'
# Default branch name
lowerCamelCase : Tuple = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
lowerCamelCase : List[Any] = 'aaaaaaa'
# This commit does not exist, so we should 404.
lowerCamelCase : List[Any] = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
lowerCamelCase : int = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> int:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> Optional[Any]:
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def UpperCAmelCase ( self ) -> Optional[Any]:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_tf
def UpperCAmelCase ( self ) -> str:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_flax
def UpperCAmelCase ( self ) -> Any:
# Flax models don't have labels
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , [] )
| 684 | 0 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , A = "▁" , A = True , A = "<unk>" , A = "</s>" , A = "<pad>" , ) -> str:
snake_case : Optional[int] = {
"""pad""": {"""id""": 0, """token""": pad_token},
"""eos""": {"""id""": 1, """token""": eos_token},
"""unk""": {"""id""": 2, """token""": unk_token},
}
snake_case : List[str] = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
snake_case : Union[str, Any] = token_dict["""token"""]
snake_case : Any = Tokenizer(Unigram() )
snake_case : List[str] = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(""" {2,}""" ) , """ """ ),
normalizers.Lowercase(),
] )
snake_case : str = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=A , add_prefix_space=A ),
pre_tokenizers.Digits(individual_digits=A ),
pre_tokenizers.Punctuation(),
] )
snake_case : Tuple = decoders.Metaspace(replacement=A , add_prefix_space=A )
snake_case : int = TemplateProcessing(
single=f"""$A {self.special_tokens["eos"]["token"]}""" , special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] , )
snake_case : Optional[Any] = {
"""model""": """SentencePieceUnigram""",
"""replacement""": replacement,
"""add_prefix_space""": add_prefix_space,
}
super().__init__(A , A )
def UpperCAmelCase ( self , A , A = 8_0_0_0 , A = True , ) -> Optional[Any]:
snake_case : Dict = trainers.UnigramTrainer(
vocab_size=A , special_tokens=self.special_tokens_list , show_progress=A , )
if isinstance(A , A ):
snake_case : List[str] = [files]
self._tokenizer.train(A , trainer=A )
self.add_unk_id()
def UpperCAmelCase ( self , A , A = 8_0_0_0 , A = True , ) -> List[str]:
snake_case : Optional[int] = trainers.UnigramTrainer(
vocab_size=A , special_tokens=self.special_tokens_list , show_progress=A , )
self._tokenizer.train_from_iterator(A , trainer=A )
self.add_unk_id()
def UpperCAmelCase ( self ) -> int:
snake_case : Any = json.loads(self._tokenizer.to_str() )
snake_case : List[Any] = self.special_tokens["""unk"""]["""id"""]
snake_case : Union[str, Any] = Tokenizer.from_str(json.dumps(A ) )
| 710 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """audio-spectrogram-transformer"""
def __init__( self , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.0 , A=0.0 , A=0.02 , A=1e-1_2 , A=1_6 , A=True , A=1_0 , A=1_0 , A=1_0_2_4 , A=1_2_8 , **A , ) -> int:
super().__init__(**A )
snake_case : Any = hidden_size
snake_case : Tuple = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Dict = intermediate_size
snake_case : int = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : int = initializer_range
snake_case : int = layer_norm_eps
snake_case : Any = patch_size
snake_case : List[Any] = qkv_bias
snake_case : int = frequency_stride
snake_case : Any = time_stride
snake_case : Union[str, Any] = max_length
snake_case : Any = num_mel_bins
| 684 | 0 |
from __future__ import annotations
lowerCamelCase : List[Any] = [-1_0, -5, 0, 5, 5.1, 1_1, 1_3, 2_1, 3, 4, -2_1, -1_0, -5, -1, 0]
lowerCamelCase : int = [-5, 0, 5, 5.1, 1_1, 1_3, 2_1, -1, 4, -1, -1_0, -5, -1, 0, -1]
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list[float]:
snake_case : List[str] = []
snake_case : Dict = len(lowercase )
for i in range(lowercase ):
snake_case : float = -1
for j in range(i + 1 ,lowercase ):
if arr[i] < arr[j]:
snake_case : List[Any] = arr[j]
break
result.append(lowercase )
return result
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list[float]:
snake_case : Union[str, Any] = []
for i, outer in enumerate(lowercase ):
snake_case : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
snake_case : Tuple = inner
break
result.append(lowercase )
return result
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list[float]:
snake_case : Optional[Any] = len(lowercase )
snake_case : list[float] = []
snake_case : list[float] = [-1] * arr_size
for index in reversed(range(lowercase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
snake_case : Optional[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCamelCase : List[str] = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 711 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase : Any = logging.get_logger(__name__)
class __lowercase (enum.Enum ):
"""simple docstring"""
_snake_case = 0
_snake_case = 1
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """generated"""
def __init__( self , *A , **A ) -> Optional[Any]:
super().__init__(*A , **A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase ( self , A=None , A=None , A=None , A=None , A=None , A=None , **A , ) -> Optional[int]:
snake_case : Tuple = {}
if truncation is not None:
snake_case : Union[str, Any] = truncation
snake_case : Dict = generate_kwargs
snake_case : int = {}
if return_tensors is not None and return_type is None:
snake_case : List[Any] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
snake_case : List[str] = return_type
if clean_up_tokenization_spaces is not None:
snake_case : int = clean_up_tokenization_spaces
if stop_sequence is not None:
snake_case : Tuple = self.tokenizer.encode(A , add_special_tokens=A )
if len(A ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
snake_case : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
return True
def UpperCAmelCase ( self , *A , A ) -> Tuple:
snake_case : Union[str, Any] = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , A ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
snake_case : Union[str, Any] = ([prefix + arg for arg in args[0]],)
snake_case : List[Any] = True
elif isinstance(args[0] , A ):
snake_case : str = (prefix + args[0],)
snake_case : str = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
snake_case : Optional[Any] = self.tokenizer(*A , padding=A , truncation=A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *A , **A ) -> Union[str, Any]:
snake_case : Tuple = super().__call__(*A , **A )
if (
isinstance(args[0] , A )
and all(isinstance(A , A ) for el in args[0] )
and all(len(A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase ( self , A , A=TruncationStrategy.DO_NOT_TRUNCATE , **A ) -> str:
snake_case : Optional[Any] = self._parse_and_tokenize(A , truncation=A , **A )
return inputs
def UpperCAmelCase ( self , A , **A ) -> Tuple:
if self.framework == "pt":
snake_case , snake_case : List[str] = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
snake_case , snake_case : Optional[Any] = tf.shape(model_inputs["""input_ids"""] ).numpy()
snake_case : Dict = generate_kwargs.get("""min_length""" , self.model.config.min_length )
snake_case : str = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(A , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
snake_case : List[str] = self.model.generate(**A , **A )
snake_case : Dict = output_ids.shape[0]
if self.framework == "pt":
snake_case : List[Any] = output_ids.reshape(A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
snake_case : Any = tf.reshape(A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase ( self , A , A=ReturnType.TEXT , A=False ) -> Union[str, Any]:
snake_case : Tuple = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
snake_case : Dict = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
snake_case : int = {
f"""{self.return_name}_text""": self.tokenizer.decode(
A , skip_special_tokens=A , clean_up_tokenization_spaces=A , )
}
records.append(A )
return records
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """summary"""
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
def UpperCAmelCase ( self , A , A , A ) -> bool:
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """translation"""
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def UpperCAmelCase ( self , *A , A=TruncationStrategy.DO_NOT_TRUNCATE , A=None , A=None ) -> Optional[int]:
if getattr(self.tokenizer , """_build_translation_inputs""" , A ):
return self.tokenizer._build_translation_inputs(
*A , return_tensors=self.framework , truncation=A , src_lang=A , tgt_lang=A )
else:
return super()._parse_and_tokenize(*A , truncation=A )
def UpperCAmelCase ( self , A=None , A=None , **A ) -> Union[str, Any]:
snake_case , snake_case , snake_case : str = super()._sanitize_parameters(**A )
if src_lang is not None:
snake_case : Tuple = src_lang
if tgt_lang is not None:
snake_case : str = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
snake_case : Union[str, Any] = kwargs.get("""task""" , self.task )
snake_case : Any = task.split("""_""" )
if task and len(A ) == 4:
# translation, XX, to YY
snake_case : Optional[Any] = items[1]
snake_case : Dict = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
| 684 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 712 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
snake_case : int = []
for line in lines:
snake_case : Dict = re.sub(R"""#.*""" ,"""""" ,lowercase ) # remove comments
if line:
filtered_lines.append(lowercase )
snake_case : Optional[int] = """\n""".join(lowercase )
# Make a hash from all this code
snake_case : List[str] = full_str.encode("""utf-8""" )
return shaaaa(lowercase ).hexdigest()
# get importable module names and hash for caching
lowerCamelCase : Any = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
lowerCamelCase : Optional[int] = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
lowerCamelCase : Tuple = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
lowerCamelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 684 | 0 |
import re
import string
import numpy as np
import datasets
lowerCamelCase : Any = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
lowerCamelCase : Optional[Any] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
lowerCamelCase : List[Any] = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase (datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def UpperCAmelCase ( self , A , A , A=None , A=False , A=False , A=False , ) -> str:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
snake_case : Optional[int] = np.array([re.sub(A , """""" , A ) for x in predictions] )
snake_case : Optional[int] = np.array([re.sub(A , """""" , A ) for x in references] )
else:
snake_case : List[Any] = np.asarray(A )
snake_case : List[Any] = np.asarray(A )
if ignore_case:
snake_case : List[Any] = np.char.lower(A )
snake_case : List[str] = np.char.lower(A )
if ignore_punctuation:
snake_case : List[str] = string.punctuation.maketrans("""""" , """""" , string.punctuation )
snake_case : Dict = np.char.translate(A , table=A )
snake_case : Optional[int] = np.char.translate(A , table=A )
if ignore_numbers:
snake_case : Dict = string.digits.maketrans("""""" , """""" , string.digits )
snake_case : List[str] = np.char.translate(A , table=A )
snake_case : Tuple = np.char.translate(A , table=A )
snake_case : Dict = predictions == references
return {"exact_match": np.mean(A ) * 1_0_0}
| 713 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Tuple:
# Initialise PyTorch model
snake_case : int = RemBertConfig.from_json_file(lowercase )
print("""Building PyTorch model from configuration: {}""".format(str(lowercase ) ) )
snake_case : Tuple = RemBertModel(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowercase ,lowercase ,lowercase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(lowercase ) )
torch.save(model.state_dict() ,lowercase )
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCamelCase : Dict = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 684 | 0 |
lowerCamelCase : List[Any] = 2_5_6
# Modulus to hash a string
lowerCamelCase : Tuple = 1_0_0_0_0_0_3
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> bool:
snake_case : Optional[int] = len(lowercase )
snake_case : Optional[Any] = len(lowercase )
if p_len > t_len:
return False
snake_case : List[str] = 0
snake_case : Optional[int] = 0
snake_case : List[str] = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase ):
snake_case : Any = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
snake_case : str = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
snake_case : Optional[int] = (modulus_power * alphabet_size) % modulus
for i in range(0 ,t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
snake_case : Any = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def SCREAMING_SNAKE_CASE__ ( ) -> None:
snake_case : int = """abc1abc12"""
snake_case : Tuple = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
snake_case : Dict = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(lowercase ,lowercase ) and not rabin_karp(lowercase ,lowercase )
# Test 2)
snake_case : int = """ABABX"""
snake_case : Tuple = """ABABZABABYABABX"""
assert rabin_karp(lowercase ,lowercase )
# Test 3)
snake_case : Optional[int] = """AAAB"""
snake_case : Union[str, Any] = """ABAAAAAB"""
assert rabin_karp(lowercase ,lowercase )
# Test 4)
snake_case : Union[str, Any] = """abcdabcy"""
snake_case : Optional[int] = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(lowercase ,lowercase )
# Test 5)
snake_case : Tuple = """Lü"""
snake_case : Tuple = """Lüsai"""
assert rabin_karp(lowercase ,lowercase )
snake_case : Optional[Any] = """Lue"""
assert not rabin_karp(lowercase ,lowercase )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 714 |
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[str]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Union[str, Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Any:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Dict:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
| 684 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
snake_case : Union[str, Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=lowercase ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=lowercase ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=lowercase )
return parser.parse_args()
def SCREAMING_SNAKE_CASE__ ( ) -> int:
snake_case : Union[str, Any] = parse_args()
# Import training_script as a module.
snake_case : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
snake_case : Union[str, Any] = script_fpath.stem
snake_case : Optional[Any] = importlib.import_module(lowercase )
# Patch sys.argv
snake_case : List[str] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 715 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCamelCase : List[str] = 3
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
print("""Generating primitive root of p""" )
while True:
snake_case : Optional[int] = random.randrange(3 ,lowercase )
if pow(lowercase ,2 ,lowercase ) == 1:
continue
if pow(lowercase ,lowercase ,lowercase ) == 1:
continue
return g
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print("""Generating prime p...""" )
snake_case : Optional[int] = rabin_miller.generate_large_prime(lowercase ) # select large prime number.
snake_case : Optional[int] = primitive_root(lowercase ) # one primitive root on modulo p.
snake_case : Optional[Any] = random.randrange(3 ,lowercase ) # private_key -> have to be greater than 2 for safety.
snake_case : Tuple = cryptomath.find_mod_inverse(pow(lowercase ,lowercase ,lowercase ) ,lowercase )
snake_case : str = (key_size, e_a, e_a, p)
snake_case : Optional[Any] = (key_size, d)
return public_key, private_key
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> None:
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print("""\nWARNING:""" )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
snake_case , snake_case : Optional[Any] = generate_key(lowercase )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
print("""Making key files...""" )
make_key_files("""elgamal""" ,2048 )
print("""Key files generation successful""" )
if __name__ == "__main__":
main()
| 684 | 0 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowerCamelCase : str = {
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 2048-bit
1_4: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 3072-bit
1_5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 4096-bit
1_6: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 6144-bit
1_7: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 8192-bit
1_8: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
}
class __lowercase :
"""simple docstring"""
def __init__( self , A = 1_4 ) -> None:
if group not in primes:
raise ValueError("""Unsupported Group""" )
snake_case : Dict = primes[group]["""prime"""]
snake_case : str = primes[group]["""generator"""]
snake_case : Optional[Any] = int(hexlify(urandom(3_2 ) ) , base=1_6 )
def UpperCAmelCase ( self ) -> str:
return hex(self.__private_key )[2:]
def UpperCAmelCase ( self ) -> str:
snake_case : Dict = pow(self.generator , self.__private_key , self.prime )
return hex(A )[2:]
def UpperCAmelCase ( self , A ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(A , (self.prime - 1) // 2 , self.prime ) == 1
)
def UpperCAmelCase ( self , A ) -> str:
snake_case : Tuple = int(A , base=1_6 )
if not self.is_valid_public_key(A ):
raise ValueError("""Invalid public key""" )
snake_case : Optional[int] = pow(A , self.__private_key , self.prime )
return shaaaa(str(A ).encode() ).hexdigest()
@staticmethod
def UpperCAmelCase ( A , A ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(A , (prime - 1) // 2 , A ) == 1
)
@staticmethod
def UpperCAmelCase ( A , A , A = 1_4 ) -> str:
snake_case : Tuple = int(A , base=1_6 )
snake_case : Optional[int] = int(A , base=1_6 )
snake_case : Tuple = primes[group]["""prime"""]
if not DiffieHellman.is_valid_public_key_static(A , A ):
raise ValueError("""Invalid public key""" )
snake_case : int = pow(A , A , A )
return shaaaa(str(A ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
snake_case : Dict = _modexpt(lowercase ,exponent // 2 ,lowercase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowercase ,exponent - 1 ,lowercase )) % modulo_value
def SCREAMING_SNAKE_CASE__ ( lowercase = 1777 ,lowercase = 1855 ,lowercase = 8 ) -> int:
snake_case : int = base
for _ in range(1 ,lowercase ):
snake_case : List[str] = _modexpt(lowercase ,lowercase ,10**digits )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase : Optional[Any] = {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = ['BeitFeatureExtractor']
lowerCamelCase : Tuple = ['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = [
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
lowerCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 717 |
from itertools import product
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list[int]:
snake_case : Tuple = sides_number
snake_case : List[str] = max_face_number * dice_number
snake_case : Any = [0] * (max_total + 1)
snake_case : int = 1
snake_case : List[str] = range(lowercase ,max_face_number + 1 )
for dice_numbers in product(lowercase ,repeat=lowercase ):
snake_case : Any = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def SCREAMING_SNAKE_CASE__ ( ) -> float:
snake_case : List[str] = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
snake_case : str = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
snake_case : Optional[int] = 0
snake_case : List[str] = 9
snake_case : Union[str, Any] = 4 * 9
snake_case : Dict = 6
for peter_total in range(lowercase ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
snake_case : str = (4**9) * (6**6)
snake_case : int = peter_wins_count / total_games_number
snake_case : Optional[int] = round(lowercase ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 | 0 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase = 1 / sqrt(2 ) ) -> IIRFilter:
snake_case : Dict = tau * frequency / samplerate
snake_case : int = sin(lowercase )
snake_case : Optional[Any] = cos(lowercase )
snake_case : List[Any] = _sin / (2 * q_factor)
snake_case : Tuple = (1 - _cos) / 2
snake_case : int = 1 - _cos
snake_case : Union[str, Any] = 1 + alpha
snake_case : List[Any] = -2 * _cos
snake_case : int = 1 - alpha
snake_case : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase = 1 / sqrt(2 ) ) -> IIRFilter:
snake_case : Tuple = tau * frequency / samplerate
snake_case : Union[str, Any] = sin(lowercase )
snake_case : Tuple = cos(lowercase )
snake_case : List[str] = _sin / (2 * q_factor)
snake_case : List[Any] = (1 + _cos) / 2
snake_case : Any = -1 - _cos
snake_case : Union[str, Any] = 1 + alpha
snake_case : int = -2 * _cos
snake_case : str = 1 - alpha
snake_case : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase = 1 / sqrt(2 ) ) -> IIRFilter:
snake_case : Union[str, Any] = tau * frequency / samplerate
snake_case : Any = sin(lowercase )
snake_case : str = cos(lowercase )
snake_case : List[Any] = _sin / (2 * q_factor)
snake_case : List[str] = _sin / 2
snake_case : Optional[Any] = 0
snake_case : List[str] = -ba
snake_case : str = 1 + alpha
snake_case : List[Any] = -2 * _cos
snake_case : List[str] = 1 - alpha
snake_case : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase = 1 / sqrt(2 ) ) -> IIRFilter:
snake_case : Optional[Any] = tau * frequency / samplerate
snake_case : Any = sin(lowercase )
snake_case : Optional[Any] = cos(lowercase )
snake_case : Dict = _sin / (2 * q_factor)
snake_case : Tuple = 1 - alpha
snake_case : List[str] = -2 * _cos
snake_case : List[Any] = 1 + alpha
snake_case : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] ,[ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase = 1 / sqrt(2 ) ,) -> IIRFilter:
snake_case : int = tau * frequency / samplerate
snake_case : Optional[Any] = sin(lowercase )
snake_case : str = cos(lowercase )
snake_case : Optional[int] = _sin / (2 * q_factor)
snake_case : Dict = 10 ** (gain_db / 40)
snake_case : str = 1 + alpha * big_a
snake_case : Optional[Any] = -2 * _cos
snake_case : Tuple = 1 - alpha * big_a
snake_case : Dict = 1 + alpha / big_a
snake_case : int = -2 * _cos
snake_case : Any = 1 - alpha / big_a
snake_case : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase = 1 / sqrt(2 ) ,) -> IIRFilter:
snake_case : Any = tau * frequency / samplerate
snake_case : Optional[int] = sin(lowercase )
snake_case : Optional[Any] = cos(lowercase )
snake_case : Dict = _sin / (2 * q_factor)
snake_case : List[str] = 10 ** (gain_db / 40)
snake_case : Tuple = (big_a + 1) - (big_a - 1) * _cos
snake_case : Any = (big_a + 1) + (big_a - 1) * _cos
snake_case : List[Any] = (big_a - 1) - (big_a + 1) * _cos
snake_case : List[Any] = (big_a - 1) + (big_a + 1) * _cos
snake_case : int = 2 * sqrt(lowercase ) * alpha
snake_case : List[Any] = big_a * (pmc + aaa)
snake_case : Dict = 2 * big_a * mpc
snake_case : List[Any] = big_a * (pmc - aaa)
snake_case : int = ppmc + aaa
snake_case : Dict = -2 * pmpc
snake_case : str = ppmc - aaa
snake_case : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase = 1 / sqrt(2 ) ,) -> IIRFilter:
snake_case : Union[str, Any] = tau * frequency / samplerate
snake_case : Tuple = sin(lowercase )
snake_case : Any = cos(lowercase )
snake_case : Tuple = _sin / (2 * q_factor)
snake_case : int = 10 ** (gain_db / 40)
snake_case : Optional[Any] = (big_a + 1) - (big_a - 1) * _cos
snake_case : Dict = (big_a + 1) + (big_a - 1) * _cos
snake_case : Union[str, Any] = (big_a - 1) - (big_a + 1) * _cos
snake_case : List[Any] = (big_a - 1) + (big_a + 1) * _cos
snake_case : Dict = 2 * sqrt(lowercase ) * alpha
snake_case : List[Any] = big_a * (ppmc + aaa)
snake_case : List[Any] = -2 * big_a * pmpc
snake_case : str = big_a * (ppmc - aaa)
snake_case : List[str] = pmc + aaa
snake_case : List[str] = 2 * mpc
snake_case : Union[str, Any] = pmc - aaa
snake_case : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt | 718 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 684 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : int = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowerCamelCase : Tuple = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
lowerCamelCase : Tuple = {'facebook/blenderbot-3B': 1_2_8}
class __lowercase ( UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
_snake_case = BlenderbotTokenizer
def __init__( self , A=None , A=None , A=None , A="replace" , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A=False , A=True , **A , ) -> Any:
super().__init__(
A , A , tokenizer_file=A , errors=A , bos_token=A , eos_token=A , sep_token=A , cls_token=A , unk_token=A , pad_token=A , mask_token=A , add_prefix_space=A , trim_offsets=A , **A , )
snake_case : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , A ) != add_prefix_space:
snake_case : Optional[int] = getattr(A , pre_tok_state.pop("""type""" ) )
snake_case : Dict = add_prefix_space
snake_case : List[str] = pre_tok_class(**A )
snake_case : Any = add_prefix_space
snake_case : List[str] = """post_processor"""
snake_case : Union[str, Any] = getattr(self.backend_tokenizer , A , A )
if tokenizer_component_instance:
snake_case : str = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case : Union[str, Any] = tuple(state["""sep"""] )
if "cls" in state:
snake_case : int = tuple(state["""cls"""] )
snake_case : List[Any] = False
if state.get("""add_prefix_space""" , A ) != add_prefix_space:
snake_case : str = add_prefix_space
snake_case : int = True
if state.get("""trim_offsets""" , A ) != trim_offsets:
snake_case : Dict = trim_offsets
snake_case : Union[str, Any] = True
if changes_to_apply:
snake_case : List[str] = getattr(A , state.pop("""type""" ) )
snake_case : Any = component_class(**A )
setattr(self.backend_tokenizer , A , A )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCAmelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase ( self , A ) -> Any:
snake_case : List[Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else value
snake_case : str = value
def UpperCAmelCase ( self , *A , **A ) -> BatchEncoding:
snake_case : Optional[int] = kwargs.get("""is_split_into_words""" , A )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A , **A )
def UpperCAmelCase ( self , *A , **A ) -> BatchEncoding:
snake_case : List[Any] = kwargs.get("""is_split_into_words""" , A )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A , **A )
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
snake_case : Union[str, Any] = self._tokenizer.model.save(A , name=A )
return tuple(A )
def UpperCAmelCase ( self , A , A = None ) -> List[int]:
snake_case : Union[str, Any] = [self.sep_token_id]
snake_case : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase ( self , A , A = None ) -> List[str]:
return token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self , A ) -> List[int]:
snake_case : Union[str, Any] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(A )
snake_case : int = """ """.join(A )
snake_case : Any = self.encode(A )
if len(A ) > self.model_max_length:
snake_case : str = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 719 |
import os
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
with open(os.path.dirname(lowercase ) + """/grid.txt""" ) as f:
snake_case : Tuple = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowercase ) for x in f.readline().split()] )
snake_case : Optional[Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
snake_case : List[Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
snake_case : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
snake_case : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
snake_case : str = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
snake_case : int = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
snake_case : int = temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
snake_case : Any = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
snake_case : Any = temp
return maximum
if __name__ == "__main__":
print(solution())
| 684 | 0 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
lowerCamelCase : Tuple = getLogger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase = 8 ,lowercase = 1024 ,lowercase="val" ,lowercase=None ,lowercase=False ,lowercase="summarization" ,lowercase=None ,lowercase=1 ,lowercase = None ,lowercase="" ,**lowercase ,) -> Dict:
snake_case : Union[str, Any] = str(lowercase )
assert local_rank is not None
torch.distributed.init_process_group(backend="""nccl""" ,rank=lowercase )
snake_case : Union[str, Any] = Path(lowercase )
snake_case : Tuple = save_dir.joinpath(f"""rank_{local_rank}_output.json""" )
torch.cuda.set_device(lowercase )
snake_case : str = AutoModelForSeqaSeqLM.from_pretrained(lowercase ).cuda()
if fpaa:
snake_case : int = model.half()
# determine if we need to increase num_beams
use_task_specific_params(lowercase ,lowercase ) # update config with task specific params
snake_case : Optional[Any] = generate_kwargs.pop("""num_beams""" ,model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
snake_case : Union[str, Any] = num_return_sequences
snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
if max_source_length is None:
snake_case : Dict = tokenizer.model_max_length
if prefix is None:
snake_case : str = prefix or getattr(model.config ,"""prefix""" ,"""""" ) or """"""
snake_case : Union[str, Any] = SeqaSeqDataset(
lowercase ,lowercase ,lowercase ,max_target_length=1024 ,type_path=lowercase ,n_obs=lowercase ,prefix=lowercase ,**lowercase ,)
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
snake_case : Dict = ds.make_sortish_sampler(lowercase ,distributed=lowercase ,add_extra_examples=lowercase ,shuffle=lowercase )
snake_case : Union[str, Any] = DataLoader(lowercase ,sampler=lowercase ,batch_size=lowercase ,collate_fn=ds.collate_fn )
snake_case : Optional[int] = []
for batch in tqdm(lowercase ):
snake_case : int = model.generate(
input_ids=batch["""input_ids"""].to(model.device ) ,attention_mask=batch["""attention_mask"""].to(model.device ) ,num_return_sequences=lowercase ,num_beams=lowercase ,**lowercase ,)
snake_case : List[Any] = tokenizer.batch_decode(lowercase ,skip_special_tokens=lowercase ,clean_up_tokenization_spaces=lowercase )
snake_case : List[str] = batch["""ids"""]
if num_return_sequences > 1:
snake_case : Union[str, Any] = chunks(lowercase ,lowercase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(lowercase ):
results.append({"""pred""": pred, """id""": ids[i].item()} )
save_json(lowercase ,lowercase )
return results, sampler.num_replicas
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
snake_case : Optional[int] = argparse.ArgumentParser(
epilog="""Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate""" )
parser.add_argument("""--data_dir""" ,type=lowercase ,help="""like cnn_dm/test.source""" )
parser.add_argument(
"""--model_name""" ,type=lowercase ,help="""like facebook/bart-large-cnn,t5-base, etc.""" ,default="""sshleifer/distilbart-xsum-12-3""" ,)
parser.add_argument("""--save_dir""" ,type=lowercase ,help="""where to save""" ,default="""tmp_gen""" )
parser.add_argument("""--max_source_length""" ,type=lowercase ,default=lowercase )
parser.add_argument(
"""--type_path""" ,type=lowercase ,default="""test""" ,help="""which subset to evaluate typically train/val/test""" )
parser.add_argument("""--task""" ,type=lowercase ,default="""summarization""" ,help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""" ,type=lowercase ,default=8 ,required=lowercase ,help="""batch size""" )
parser.add_argument(
"""--local_rank""" ,type=lowercase ,default=-1 ,required=lowercase ,help="""should be passed by distributed.launch""" )
parser.add_argument(
"""--n_obs""" ,type=lowercase ,default=lowercase ,required=lowercase ,help="""How many observations. Defaults to all.""" )
parser.add_argument(
"""--num_return_sequences""" ,type=lowercase ,default=1 ,required=lowercase ,help="""How many sequences to return""" )
parser.add_argument(
"""--sync_timeout""" ,type=lowercase ,default=600 ,required=lowercase ,help="""How long should master process wait for other processes to finish.""" ,)
parser.add_argument("""--src_lang""" ,type=lowercase ,default=lowercase ,required=lowercase )
parser.add_argument("""--tgt_lang""" ,type=lowercase ,default=lowercase ,required=lowercase )
parser.add_argument(
"""--prefix""" ,type=lowercase ,required=lowercase ,default=lowercase ,help="""will be added to the begininng of src examples""" )
parser.add_argument("""--fp16""" ,action="""store_true""" )
parser.add_argument("""--debug""" ,action="""store_true""" )
snake_case : Optional[Any] = time.time()
snake_case : Optional[Any] = parser.parse_known_args()
snake_case : int = parse_numeric_n_bool_cl_kwargs(lowercase )
if generate_kwargs and args.local_rank <= 0:
print(f"""parsed the following generate kwargs: {generate_kwargs}""" )
snake_case : Tuple = Path(args.save_dir + """_tmp""" )
Path(lowercase ).mkdir(exist_ok=lowercase ) # this handles locking.
snake_case : Any = list(json_save_dir.glob("""rank_*.json""" ) )
if intermediate_files:
raise ValueError(f"""Found files at {json_save_dir} please move or remove them.""" )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
snake_case : List[Any] = {}
if args.src_lang is not None:
snake_case : Dict = args.src_lang
if args.tgt_lang is not None:
snake_case : Any = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=lowercase )
snake_case : Tuple = eval_data_dir(
args.data_dir ,lowercase ,args.model_name ,type_path=args.type_path ,bs=args.bs ,fpaa=args.fpaa ,task=args.task ,local_rank=args.local_rank ,n_obs=args.n_obs ,max_source_length=args.max_source_length ,num_return_sequences=args.num_return_sequences ,prefix=args.prefix ,dataset_kwargs=lowercase ,**lowercase ,)
if args.local_rank <= 0:
snake_case : List[Any] = Path(args.save_dir )
save_dir.mkdir(exist_ok=lowercase )
snake_case : int = gather_results_from_each_node(lowercase ,lowercase ,args.sync_timeout )
snake_case : Dict = combine_partial_results(lowercase )
if args.num_return_sequences > 1:
snake_case : Union[str, Any] = save_dir.joinpath("""pseudolabel_results.json""" )
print(f"""Saving aggregated results at {save_path}, intermediate in {json_save_dir}/""" )
save_json(lowercase ,lowercase )
return
snake_case : Tuple = Path(args.data_dir ).joinpath(args.type_path + """.target""" )
with open(lowercase ) as f:
snake_case : Optional[int] = [x.rstrip() for x in f.readlines()][: len(lowercase )]
# Calculate metrics, save metrics, and save _generations.txt
snake_case : List[str] = """translation""" in args.task
snake_case : Tuple = calculate_bleu if calc_bleu else calculate_rouge
snake_case : int = """bleu""" if calc_bleu else """rouge"""
snake_case : Dict = score_fn(lowercase ,lowercase )
snake_case : int = len(lowercase )
snake_case : Union[str, Any] = time.time() - start_time
snake_case : Union[str, Any] = round(runtime / metrics["""n_obs"""] ,4 )
snake_case : List[Any] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
snake_case : List[Any] = save_dir.joinpath(f"""{args.type_path}_{metric_name}.json""" )
save_json(lowercase ,lowercase ,indent=lowercase )
print(lowercase )
write_txt_file(lowercase ,save_dir.joinpath(f"""{args.type_path}_generations.txt""" ) )
if args.debug:
write_txt_file(lowercase ,save_dir.joinpath(f"""{args.type_path}.target""" ) )
else:
shutil.rmtree(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List:
snake_case : Optional[int] = []
for partial_result in partial_results:
records.extend(lowercase )
snake_case : Any = sorted(lowercase ,key=lambda lowercase : x["id"] )
snake_case : Union[str, Any] = [x["""pred"""] for x in records]
return preds
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> List[Dict[str, List]]:
# WAIT FOR lots of .json files
snake_case : List[Any] = time.time()
logger.info("""waiting for all nodes to finish""" )
snake_case : Union[str, Any] = None
while (time.time() - start_wait) < timeout:
snake_case : str = list(save_dir.glob("""rank_*.json""" ) )
if len(lowercase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
snake_case : List[Any] = lmap(lowercase ,lowercase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("""Rank 0 gave up on waiting for other processes""" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 720 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list:
for i in range(len(lowercase ) - 1 ,0 ,-1 ):
snake_case : Any = False
for j in range(lowercase ,0 ,-1 ):
if unsorted[j] < unsorted[j - 1]:
snake_case , snake_case : Optional[Any] = unsorted[j - 1], unsorted[j]
snake_case : Dict = True
for j in range(lowercase ):
if unsorted[j] > unsorted[j + 1]:
snake_case , snake_case : Dict = unsorted[j + 1], unsorted[j]
snake_case : Tuple = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Any = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase : Optional[int] = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 684 | 0 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
lowerCamelCase : Dict = True
from torch.cuda.amp import autocast
lowerCamelCase : List[Any] = logging.getLogger(__name__)
@dataclass
class __lowercase :
"""simple docstring"""
_snake_case = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_snake_case = field(
default=UpperCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
_snake_case = field(
default=UpperCamelCase__ , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
_snake_case = field(
default=UpperCamelCase__ , metadata={"""help""": """Whether to log verbose messages or not."""} , )
_snake_case = field(
default=2.0 , metadata={"""help""": """Maximum temperature for gumbel softmax."""} )
_snake_case = field(
default=0.5 , metadata={"""help""": """Minimum temperature for gumbel softmax."""} )
_snake_case = field(
default=0.999_995 , metadata={"""help""": """Decay of gumbel temperature during training."""} )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Tuple:
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
snake_case : int = logging.WARNING
if model_args.verbose_logging:
snake_case : List[str] = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
snake_case : Any = logging.INFO
logger.setLevel(lowercase )
@dataclass
class __lowercase :
"""simple docstring"""
_snake_case = field(
default=UpperCamelCase__ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
_snake_case = field(
default=UpperCamelCase__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
_snake_case = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
_snake_case = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"""
)
} , )
_snake_case = field(
default="""file""" , metadata={"""help""": """Column in the dataset that contains speech file path. Defaults to 'file'"""} , )
_snake_case = field(
default=UpperCamelCase__ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
_snake_case = field(
default=1 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
_snake_case = field(
default=UpperCamelCase__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
_snake_case = field(
default=20.0 , metadata={"""help""": """Filter audio files that are longer than `max_duration_in_seconds` seconds"""} )
@dataclass
class __lowercase :
"""simple docstring"""
_snake_case = 42
_snake_case = 42
_snake_case = """longest"""
_snake_case = None
_snake_case = None
def __call__( self , A ) -> Dict[str, torch.Tensor]:
# reformat list to dict and set to pytorch format
snake_case : Dict = self.feature_extractor.pad(
A , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
snake_case : Tuple = self.model._get_feat_extract_output_lengths(batch["""input_values"""].shape[-1] )
snake_case : Any = batch["""input_values"""].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
snake_case : Optional[int] = self.model._get_feat_extract_output_lengths(batch["""attention_mask"""].sum(-1 ) ).to(
torch.long )
snake_case : Optional[Any] = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["""input_values"""].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
snake_case : List[str] = 1
snake_case : str = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
snake_case : Tuple = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=A , min_masks=2 , )
return batch
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , *A , A=1 , A=0 , A=1.0 , **A ) -> int:
super().__init__(*A , **A )
snake_case : Optional[Any] = 0
snake_case : Union[str, Any] = max_gumbel_temp
snake_case : List[str] = min_gumbel_temp
snake_case : List[Any] = gumbel_temp_decay
def UpperCAmelCase ( self , A , A ) -> torch.Tensor:
model.train()
snake_case : Optional[Any] = self._prepare_inputs(A )
if self.use_amp:
with autocast():
snake_case : int = self.compute_loss(A , A )
else:
snake_case : Tuple = self.compute_loss(A , A )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
snake_case : Union[str, Any] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
snake_case : Optional[int] = loss.sum() / (inputs["""mask_time_indices"""]).sum()
else:
raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
snake_case : Union[str, Any] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(A ).backward()
elif self.use_apex:
with amp.scale_loss(A , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(A )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case : List[str] = parser.parse_args_into_dataclasses()
configure_logger(lowercase ,lowercase )
# Downloading and loading a dataset from the hub.
snake_case : Tuple = load_dataset(data_args.dataset_name ,data_args.dataset_config_name ,cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
snake_case : List[Any] = DatasetDict()
snake_case : Union[str, Any] = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=f"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""" ,cache_dir=model_args.cache_dir ,)
snake_case : str = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=f"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""" ,cache_dir=model_args.cache_dir ,)
else:
# make sure only "validation" and "train" keys remain"
snake_case : Optional[int] = DatasetDict()
snake_case : List[str] = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split="""validation""" ,cache_dir=model_args.cache_dir ,)
snake_case : Optional[int] = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=f"""{data_args.train_split_name}""" ,cache_dir=model_args.cache_dir ,)
# only normalized-inputs-training is supported
snake_case : Dict = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,do_normalize=lowercase )
def prepare_dataset(lowercase ):
# check that all files have the correct sampling rate
snake_case : str = librosa.load(batch[data_args.speech_file_column] ,sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
snake_case : List[str] = datasets.map(
lowercase ,num_proc=data_args.preprocessing_num_workers ,remove_columns=datasets["""train"""].column_names )
# filter audio files that are too long
snake_case : Union[str, Any] = vectorized_datasets.filter(
lambda lowercase : len(data["""speech"""] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(lowercase ):
return feature_extractor(batch["""speech"""] ,sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
snake_case : str = vectorized_datasets.map(
lowercase ,batched=lowercase ,num_proc=data_args.preprocessing_num_workers ,load_from_cache_file=not data_args.overwrite_cache ,remove_columns=vectorized_datasets["""train"""].column_names ,)
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
snake_case : Union[str, Any] = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,gradient_checkpointing=training_args.gradient_checkpointing ,)
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"""PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"""
""" ``config.feat_extract_norm='layer'""" )
snake_case : Union[str, Any] = WavaVecaForPreTraining(lowercase )
snake_case : List[str] = DataCollatorForWavaVecaPretraining(model=lowercase ,feature_extractor=lowercase )
snake_case : str = WavaVecaPreTrainer(
model=lowercase ,data_collator=lowercase ,args=lowercase ,train_dataset=vectorized_datasets["""train"""] ,eval_dataset=vectorized_datasets["""validation"""] ,tokenizer=lowercase ,max_gumbel_temp=model_args.max_gumbel_temperature ,min_gumbel_temp=model_args.min_gumbel_temperature ,gumbel_temp_decay=model_args.gumbel_temperature_decay ,)
trainer.train()
if __name__ == "__main__":
main()
| 721 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Any = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
lowerCamelCase : Any = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
lowerCamelCase : Optional[int] = {
'jukebox': 5_1_2,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_LYRIC_TOKENS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A , A , A=["v3", "v2", "v2"] , A=5_1_2 , A=5 , A="<|endoftext|>" , **A , ) -> Optional[Any]:
snake_case : Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
super().__init__(
unk_token=A , n_genres=A , version=A , max_n_lyric_tokens=A , **A , )
snake_case : Optional[Any] = version
snake_case : Optional[Any] = max_n_lyric_tokens
snake_case : Tuple = n_genres
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : Union[str, Any] = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : str = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : List[str] = json.load(A )
snake_case : Tuple = r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 7_9:
snake_case : Optional[Any] = oov.replace(r"""\-'""" , r"""\-+'""" )
snake_case : Optional[Any] = regex.compile(A )
snake_case : Optional[Any] = {v: k for k, v in self.artists_encoder.items()}
snake_case : int = {v: k for k, v in self.genres_encoder.items()}
snake_case : List[Any] = {v: k for k, v in self.lyrics_encoder.items()}
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def UpperCAmelCase ( self ) -> str:
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def UpperCAmelCase ( self , A , A , A ) -> Optional[Any]:
snake_case : Optional[int] = [self.artists_encoder.get(A , 0 ) for artist in list_artists]
for genres in range(len(A ) ):
snake_case : Optional[int] = [self.genres_encoder.get(A , 0 ) for genre in list_genres[genres]]
snake_case : Union[str, Any] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
snake_case : Optional[Any] = [[self.lyrics_encoder.get(A , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def UpperCAmelCase ( self , A ) -> List[str]:
return list(A )
def UpperCAmelCase ( self , A , A , A , **A ) -> List[str]:
snake_case , snake_case , snake_case : Any = self.prepare_for_tokenization(A , A , A )
snake_case : Tuple = self._tokenize(A )
return artist, genre, lyrics
def UpperCAmelCase ( self , A , A , A , A = False ) -> Tuple[str, str, str, Dict[str, Any]]:
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
snake_case : Tuple = artists[idx].lower()
snake_case : List[Any] = [genres[idx].lower()]
else:
snake_case : Union[str, Any] = self._normalize(artists[idx] ) + """.v2"""
snake_case : Any = [
self._normalize(A ) + """.v2""" for genre in genres[idx].split("""_""" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
snake_case : str = regex.compile(r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" )
snake_case : Dict = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
snake_case : Union[str, Any] = {vocab[index]: index + 1 for index in range(len(A ) )}
snake_case : Optional[int] = 0
snake_case : Union[str, Any] = len(A ) + 1
snake_case : Optional[int] = self.vocab
snake_case : str = {v: k for k, v in self.vocab.items()}
snake_case : int = """"""
else:
snake_case : Optional[int] = regex.compile(r"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""" )
snake_case : int = self._run_strip_accents(A )
snake_case : Any = lyrics.replace("""\\""" , """\n""" )
snake_case : Tuple = self.out_of_vocab.sub("""""" , A ), [], []
return artists, genres, lyrics
def UpperCAmelCase ( self , A ) -> List[Any]:
snake_case : int = unicodedata.normalize("""NFD""" , A )
snake_case : int = []
for char in text:
snake_case : Optional[Any] = unicodedata.category(A )
if cat == "Mn":
continue
output.append(A )
return "".join(A )
def UpperCAmelCase ( self , A ) -> str:
snake_case : Dict = (
[chr(A ) for i in range(ord("""a""" ) , ord("""z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""A""" ) , ord("""Z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""0""" ) , ord("""9""" ) + 1 )]
+ ["""."""]
)
snake_case : Dict = frozenset(A )
snake_case : Dict = re.compile(r"""_+""" )
snake_case : str = """""".join([c if c in accepted else """_""" for c in text.lower()] )
snake_case : List[Any] = pattern.sub("""_""" , A ).strip("""_""" )
return text
def UpperCAmelCase ( self , A ) -> str:
return " ".join(A )
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[Any]:
# Convert to TensorType
if not isinstance(A , A ):
snake_case : Tuple = TensorType(A )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""" )
import tensorflow as tf
snake_case : Union[str, Any] = tf.constant
snake_case : int = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""" )
import torch
snake_case : List[str] = torch.tensor
snake_case : Optional[Any] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""" )
import jax.numpy as jnp # noqa: F811
snake_case : Optional[int] = jnp.array
snake_case : Dict = _is_jax
else:
snake_case : List[str] = np.asarray
snake_case : Tuple = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
snake_case : Any = [inputs]
if not is_tensor(A ):
snake_case : List[Any] = as_tensor(A )
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""" )
return inputs
def __call__( self , A , A , A="" , A="pt" ) -> BatchEncoding:
snake_case : List[str] = [0, 0, 0]
snake_case : List[str] = [artist] * len(self.version )
snake_case : List[Any] = [genres] * len(self.version )
snake_case , snake_case , snake_case : Optional[int] = self.tokenize(A , A , A )
snake_case , snake_case , snake_case : int = self._convert_token_to_id(A , A , A )
snake_case : Any = [-INFINITY] * len(full_tokens[-1] )
snake_case : int = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=A )
for i in range(len(self.version ) )
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks} )
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=A ) )
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=A ) )
snake_case : Tuple = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=A ) )
return (artists_file, genres_file, lyrics_file)
def UpperCAmelCase ( self , A , A , A ) -> List[Any]:
snake_case : Optional[int] = self.artists_decoder.get(A )
snake_case : Optional[Any] = [self.genres_decoder.get(A ) for genre in genres_index]
snake_case : Optional[int] = [self.lyrics_decoder.get(A ) for character in lyric_index]
return artist, genres, lyrics
| 684 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , A , A ) -> Optional[int]:
super().__init__()
# make sure scheduler can always be converted to DDIM
snake_case : Dict = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=A , scheduler=A )
@torch.no_grad()
def __call__( self , A = 1 , A = None , A = 0.0 , A = 5_0 , A = None , A = "pil" , A = True , ) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , A ):
snake_case : Optional[int] = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
snake_case : Optional[int] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(A , A ) and len(A ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(A )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
snake_case : Tuple = randn_tensor(A , generator=A , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
snake_case : Any = self.unet(A , A ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
snake_case : Any = self.scheduler.step(
A , A , A , eta=A , use_clipped_model_output=A , generator=A ).prev_sample
snake_case : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
snake_case : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case : Optional[Any] = self.numpy_to_pil(A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A )
| 700 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
snake_case : str = len(lowercase )
snake_case : Tuple = []
for i in range(len(lowercase ) - pat_len + 1 ):
snake_case : str = True
for j in range(lowercase ):
if s[i + j] != pattern[j]:
snake_case : Dict = False
break
if match_found:
position.append(lowercase )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 684 | 0 |
from __future__ import annotations
from typing import Any
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> None:
create_state_space_tree(lowercase ,[] ,0 )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> None:
if index == len(lowercase ):
print(lowercase )
return
create_state_space_tree(lowercase ,lowercase ,index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(lowercase ,lowercase ,index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
lowerCamelCase = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq)
| 701 |
import numpy as np
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 0 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCamelCase : str = {
'gwf-440k': {
'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 6_5_5_3_6,
},
'jmann-small-190k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 6_5_5_3_6,
},
'jmann-large-580k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 1_3_1_0_7_2,
},
'maestro-uncond-150k': {
'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
'unlocked-uncond-250k': {
'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
'honk-140k': {
'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
}
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
return torch.atana(lowercase ,lowercase ) / math.pi * 2
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Dict:
snake_case : Tuple = torch.sin(t * math.pi / 2 ) ** 2
snake_case : Tuple = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowercase ,lowercase )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
class __lowercase (nn.Module ):
"""simple docstring"""
def __init__( self , A ) -> Any:
super().__init__()
snake_case : Optional[int] = DiffusionAttnUnetaD(A , n_attn_layers=4 )
snake_case : Tuple = deepcopy(self.diffusion )
snake_case : List[Any] = torch.quasirandom.SobolEngine(1 , scramble=A )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Dict:
snake_case : str = MODELS_MAP[model_name]["""url"""]
os.system(f"""wget {url} ./""" )
return f"""./{model_name}.ckpt"""
lowerCamelCase : int = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
}
lowerCamelCase : Optional[Any] = {
'8': 'resnets.0',
'9': 'attentions.0',
'10': 'resnets.1',
'11': 'attentions.1',
'12': 'resnets.2',
'13': 'attentions.2',
}
lowerCamelCase : int = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
'8': 'resnets.3',
'9': 'attentions.3',
'10': 'resnets.4',
'11': 'attentions.4',
'12': 'resnets.5',
'13': 'attentions.5',
}
lowerCamelCase : Optional[Any] = {
'0': 'resnets.0',
'1': 'resnets.1',
'2': 'resnets.2',
'4': 'resnets.0',
'5': 'resnets.1',
'6': 'resnets.2',
}
lowerCamelCase : int = {
'skip': 'conv_skip',
'main.0': 'conv_1',
'main.1': 'group_norm_1',
'main.3': 'conv_2',
'main.4': 'group_norm_2',
}
lowerCamelCase : Dict = {
'norm': 'group_norm',
'qkv_proj': ['query', 'key', 'value'],
'out_proj': ['proj_attn'],
}
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Dict:
if name.startswith("""skip""" ):
return name.replace("""skip""" ,RES_CONV_MAP["""skip"""] )
# name has to be of format main.{digit}
if not name.startswith("""main.""" ):
raise ValueError(f"""ResConvBlock error with {name}""" )
return name.replace(name[:6] ,RES_CONV_MAP[name[:6]] )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
for key, value in ATTN_MAP.items():
if name.startswith(lowercase ) and not isinstance(lowercase ,lowercase ):
return name.replace(lowercase ,lowercase )
elif name.startswith(lowercase ):
return [name.replace(lowercase ,lowercase ) for v in value]
raise ValueError(f"""Attn error with {name}""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=13 ) -> Any:
snake_case : List[Any] = input_string
if string.split(""".""" )[0] == "timestep_embed":
return string.replace("""timestep_embed""" ,"""time_proj""" )
snake_case : Optional[Any] = 0
if string.startswith("""net.3.""" ):
depth += 1
snake_case : Union[str, Any] = string[6:]
elif string.startswith("""net.""" ):
snake_case : Tuple = string[4:]
while string.startswith("""main.7.""" ):
depth += 1
snake_case : List[Any] = string[7:]
if string.startswith("""main.""" ):
snake_case : Union[str, Any] = string[5:]
# mid block
if string[:2].isdigit():
snake_case : List[str] = string[:2]
snake_case : str = string[2:]
else:
snake_case : Optional[Any] = string[0]
snake_case : str = string[1:]
if depth == max_depth:
snake_case : str = MID_NUM_TO_LAYER[layer_num]
snake_case : Optional[Any] = """mid_block"""
elif depth > 0 and int(lowercase ) < 7:
snake_case : Optional[int] = DOWN_NUM_TO_LAYER[layer_num]
snake_case : List[Any] = f"""down_blocks.{depth}"""
elif depth > 0 and int(lowercase ) > 7:
snake_case : Optional[int] = UP_NUM_TO_LAYER[layer_num]
snake_case : Dict = f"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
snake_case : Tuple = DEPTH_0_TO_LAYER[layer_num]
snake_case : Union[str, Any] = f"""up_blocks.{max_depth - 1}""" if int(lowercase ) > 3 else """down_blocks.0"""
if not string_left.startswith(""".""" ):
raise ValueError(f"""Naming error with {input_string} and string_left: {string_left}.""" )
snake_case : Any = string_left[1:]
if "resnets" in new_layer:
snake_case : Any = convert_resconv_naming(lowercase )
elif "attentions" in new_layer:
snake_case : int = convert_attn_naming(lowercase )
snake_case : Optional[Any] = new_string_left
if not isinstance(lowercase ,lowercase ):
snake_case : Optional[Any] = prefix + """.""" + new_layer + """.""" + string_left
else:
snake_case : Union[str, Any] = [prefix + """.""" + new_layer + """.""" + s for s in string_left]
return new_string
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[Any]:
snake_case : Optional[Any] = {}
for k, v in state_dict.items():
if k.endswith("""kernel""" ):
# up- and downsample layers, don't have trainable weights
continue
snake_case : List[str] = rename(lowercase )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowercase ,lowercase ):
snake_case : Any = transform_conv_attns(lowercase ,lowercase ,lowercase )
else:
snake_case : Optional[Any] = v
return new_state_dict
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> int:
if len(lowercase ) == 1:
if len(v.shape ) == 3:
# weight
snake_case : int = v[:, :, 0]
else:
# bias
snake_case : Any = v
else:
# qkv matrices
snake_case : str = v.shape[0]
snake_case : Tuple = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
snake_case : Union[str, Any] = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
snake_case : Tuple = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[Any]:
snake_case : Any = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
snake_case : Dict = args.model_path.split("""/""" )[-1].split(""".""" )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
snake_case : Union[str, Any] = download(lowercase )
snake_case : Tuple = MODELS_MAP[model_name]["""sample_rate"""]
snake_case : Tuple = MODELS_MAP[model_name]["""sample_size"""]
snake_case : Tuple = Object()
snake_case : int = sample_size
snake_case : List[str] = sample_rate
snake_case : int = 0
snake_case : List[Any] = UNetaDModel(sample_size=lowercase ,sample_rate=lowercase )
snake_case : Optional[Any] = diffusers_model.state_dict()
snake_case : Optional[Any] = DiffusionUncond(lowercase )
orig_model.load_state_dict(torch.load(args.model_path ,map_location=lowercase )["""state_dict"""] )
snake_case : Union[str, Any] = orig_model.diffusion_ema.eval()
snake_case : Optional[Any] = orig_model.state_dict()
snake_case : Union[str, Any] = rename_orig_weights(lowercase )
snake_case : List[str] = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
snake_case : Tuple = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowercase ) == 0, f"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith("""kernel""" ) for k in list(lowercase ) ), f"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
snake_case : Optional[int] = value.squeeze()
snake_case : int = value
diffusers_model.load_state_dict(lowercase )
snake_case : List[Any] = 100
snake_case : List[str] = 33
snake_case : Optional[Any] = IPNDMScheduler(num_train_timesteps=lowercase )
snake_case : Tuple = torch.manual_seed(lowercase )
snake_case : Optional[int] = torch.randn([1, 2, config.sample_size] ,generator=lowercase ).to(lowercase )
snake_case : Union[str, Any] = torch.linspace(1 ,0 ,steps + 1 ,device=lowercase )[:-1]
snake_case : Optional[Any] = get_crash_schedule(lowercase )
snake_case : Tuple = DanceDiffusionPipeline(unet=lowercase ,scheduler=lowercase )
snake_case : Dict = torch.manual_seed(33 )
snake_case : str = pipe(num_inference_steps=lowercase ,generator=lowercase ).audios
snake_case : Tuple = sampling.iplms_sample(lowercase ,lowercase ,lowercase ,{} )
snake_case : Any = generated.clamp(-1 ,1 )
snake_case : List[Any] = (generated - audio).abs().sum()
snake_case : Tuple = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("""Diff sum""" ,lowercase )
print("""Diff max""" ,lowercase )
assert diff_max < 1E-3, f"""Diff max: {diff_max} is too much :-/"""
print(f"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
lowerCamelCase : List[str] = parser.parse_args()
main(args)
| 702 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase : Tuple = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 684 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : Any = {
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
lowerCamelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 703 |
lowerCamelCase : Union[str, Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCamelCase : Tuple = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCamelCase : Union[str, Any] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 684 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
lowerCamelCase : Any = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
lowerCamelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 704 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'vocab_file': 'spm_char.model'}
lowerCamelCase : List[str] = {
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
lowerCamelCase : List[Any] = {
'microsoft/speecht5_asr': 1_0_2_4,
'microsoft/speecht5_tts': 1_0_2_4,
'microsoft/speecht5_vc': 1_0_2_4,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A="<s>" , A="</s>" , A="<unk>" , A="<pad>" , A = None , **A , ) -> None:
snake_case : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , pad_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
snake_case : Tuple = vocab_file
snake_case : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def UpperCAmelCase ( self ) -> List[Any]:
return self.sp_model.get_piece_size()
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Any = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[str]:
snake_case : Optional[Any] = self.__dict__.copy()
snake_case : Optional[Any] = None
return state
def __setstate__( self , A ) -> Tuple:
snake_case : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case : List[Any] = {}
snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def UpperCAmelCase ( self , A ) -> Tuple:
return self.sp_model.piece_to_id(A )
def UpperCAmelCase ( self , A ) -> int:
snake_case : Union[str, Any] = self.sp_model.IdToPiece(A )
return token
def UpperCAmelCase ( self , A ) -> Tuple:
snake_case : Optional[int] = []
snake_case : str = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A ) + token
snake_case : Dict = []
else:
current_sub_tokens.append(A )
out_string += self.sp_model.decode(A )
return out_string.strip()
def UpperCAmelCase ( self , A , A=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
snake_case : Any = [1]
if token_ids_a is None:
return ([0] * len(A )) + suffix_ones
return ([0] * len(A )) + ([0] * len(A )) + suffix_ones
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Optional[Any] = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , """wb""" ) as fi:
snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 684 | 0 |
import os
from collections.abc import Iterator
def SCREAMING_SNAKE_CASE__ ( lowercase = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(lowercase ):
snake_case : Any = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(lowercase )[1] in (".py", ".ipynb"):
yield os.path.join(lowercase ,lowercase ).lstrip("""./""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
return f"""{i * " "}*""" if i else "\n##"
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> str:
snake_case : Union[str, Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(lowercase ) or old_parts[i] != new_part) and new_part:
print(f"""{md_prefix(lowercase )} {new_part.replace("_" ," " ).title()}""" )
return new_path
def SCREAMING_SNAKE_CASE__ ( lowercase = "." ) -> None:
snake_case : str = """"""
for filepath in sorted(good_file_paths(lowercase ) ):
snake_case : Optional[Any] = os.path.split(lowercase )
if filepath != old_path:
snake_case : Any = print_path(lowercase ,lowercase )
snake_case : Optional[int] = (filepath.count(os.sep ) + 1) if filepath else 0
snake_case : List[str] = f"""{filepath}/{filename}""".replace(""" """ ,"""%20""" )
snake_case : Optional[int] = os.path.splitext(filename.replace("""_""" ,""" """ ).title() )[0]
print(f"""{md_prefix(lowercase )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md('.')
| 705 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """gpt_neox_japanese"""
def __init__( self , A=3_2_0_0_0 , A=2_5_6_0 , A=3_2 , A=3_2 , A=4 , A="gelu" , A=1.00 , A=1_0_0_0_0 , A=2_0_4_8 , A=0.02 , A=1e-5 , A=True , A=3_1_9_9_6 , A=3_1_9_9_9 , A=0.1 , A=0.0 , **A , ) -> str:
super().__init__(bos_token_id=A , eos_token_id=A , **A )
snake_case : Optional[Any] = vocab_size
snake_case : Optional[Any] = max_position_embeddings
snake_case : Union[str, Any] = hidden_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : Optional[int] = intermediate_multiple_size
snake_case : int = hidden_act
snake_case : str = rotary_pct
snake_case : Optional[Any] = rotary_emb_base
snake_case : Any = initializer_range
snake_case : Any = layer_norm_eps
snake_case : Optional[Any] = use_cache
snake_case : Tuple = attention_dropout
snake_case : Tuple = hidden_dropout
| 684 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """audio-spectrogram-transformer"""
def __init__( self , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.0 , A=0.0 , A=0.02 , A=1e-1_2 , A=1_6 , A=True , A=1_0 , A=1_0 , A=1_0_2_4 , A=1_2_8 , **A , ) -> int:
super().__init__(**A )
snake_case : Any = hidden_size
snake_case : Tuple = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Dict = intermediate_size
snake_case : int = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : int = initializer_range
snake_case : int = layer_norm_eps
snake_case : Any = patch_size
snake_case : List[Any] = qkv_bias
snake_case : int = frequency_stride
snake_case : Any = time_stride
snake_case : Union[str, Any] = max_length
snake_case : Any = num_mel_bins
| 706 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Optional[Any] = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
snake_case : Any = hex_num[0] == """-"""
if is_negative:
snake_case : int = hex_num[1:]
try:
snake_case : List[Any] = int(lowercase ,16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
snake_case : Dict = """"""
while int_num > 0:
snake_case : Dict = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 0 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[str]:
snake_case : int = 1_0
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Optional[Any] = [1, 2, 3, 4]
snake_case : str = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(A , self.block_size , 0 ) , A )
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
snake_case : List[str] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(A , self.block_size , 0 ) , A )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3]
snake_case : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(A , self.block_size , 0 ) , A )
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Tuple = """It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this."""
snake_case : Optional[Any] = process_story(A )
self.assertEqual(A , [] )
def UpperCAmelCase ( self ) -> str:
snake_case : Dict = """"""
snake_case : Tuple = process_story(A )
self.assertEqual(A , [] )
self.assertEqual(A , [] )
def UpperCAmelCase ( self ) -> List[str]:
snake_case : int = (
"""It was the year of Our Lord one thousand seven hundred and """
"""seventy-five\n\nSpiritual revelations were conceded to England """
"""at that favoured period, as at this.\n@highlight\n\nIt was the best of times"""
)
snake_case : List[Any] = process_story(A )
snake_case : str = [
"""It was the year of Our Lord one thousand seven hundred and seventy-five.""",
"""Spiritual revelations were conceded to England at that favoured period, as at this.""",
]
self.assertEqual(A , A )
snake_case : List[str] = ["""It was the best of times."""]
self.assertEqual(A , A )
def UpperCAmelCase ( self ) -> Any:
snake_case : Dict = torch.tensor([1, 2, 3, 4] )
snake_case : Tuple = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(A , 0 ).numpy() , expected.numpy() )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : List[Any] = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] )
snake_case : Dict = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(A , 2_3 ).numpy() , expected.numpy() )
def UpperCAmelCase ( self ) -> Any:
snake_case : List[str] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
snake_case : Dict = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(A , 1 ).numpy() , expected.numpy() )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Dict = 1_0_1
snake_case : int = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] )
snake_case : Optional[int] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
snake_case : List[Any] = compute_token_type_ids(A , A )
np.testing.assert_array_equal(A , A )
| 707 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""pixel_values"""]
def __init__( self , A = True , A = None , A = PIL.Image.BICUBIC , A = True , A = None , A = 1 / 2_5_5 , A = True , A = True , A = None , A = None , **A , ) -> None:
super().__init__(**A )
snake_case : int = size if size is not None else {"""height""": 2_5_6, """width""": 2_5_6}
snake_case : int = get_size_dict(A )
snake_case : Optional[Any] = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
snake_case : Dict = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = do_resize
snake_case : str = size
snake_case : Tuple = resample
snake_case : Any = do_center_crop
snake_case : Tuple = crop_size
snake_case : int = do_rescale
snake_case : Dict = rescale_factor
snake_case : Union[str, Any] = do_normalize
snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase ( self , A , A , A = PIL.Image.BICUBIC , A = None , **A , ) -> np.ndarray:
snake_case : Dict = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
A , size=(size["""height"""], size["""width"""]) , resample=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> np.ndarray:
snake_case : Any = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(A , size=(size["""height"""], size["""width"""]) , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> Tuple:
return rescale(A , scale=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A , A = None , **A , ) -> np.ndarray:
return normalize(A , mean=A , std=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A = None , A = None , A=None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
snake_case : str = do_resize if do_resize is not None else self.do_resize
snake_case : Dict = resample if resample is not None else self.resample
snake_case : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Tuple = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case : int = image_mean if image_mean is not None else self.image_mean
snake_case : List[str] = image_std if image_std is not None else self.image_std
snake_case : Dict = size if size is not None else self.size
snake_case : Tuple = get_size_dict(A )
snake_case : Dict = crop_size if crop_size is not None else self.crop_size
snake_case : List[str] = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case : Optional[Any] = [to_numpy_array(A ) for image in images]
if do_resize:
snake_case : Dict = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_center_crop:
snake_case : List[str] = [self.center_crop(image=A , size=A ) for image in images]
if do_rescale:
snake_case : List[str] = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
snake_case : str = [self.normalize(image=A , mean=A , std=A ) for image in images]
snake_case : Union[str, Any] = [to_channel_dimension_format(A , A ) for image in images]
snake_case : List[Any] = {"""pixel_values""": images}
return BatchFeature(data=A , tensor_type=A )
| 684 | 0 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowerCamelCase : int = logging.get_logger(__name__)
@add_end_docstrings(
UpperCamelCase__ , R"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def UpperCAmelCase ( self , A ) -> np.ndarray:
if self.framework == "tf":
snake_case : Union[str, Any] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
snake_case : int = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=A )
else:
raise ValueError("""Unsupported framework""" )
return masked_index
def UpperCAmelCase ( self , A ) -> np.ndarray:
snake_case : List[Any] = self.get_masked_index(A )
snake_case : Dict = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , f"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def UpperCAmelCase ( self , A ) -> Dict:
if isinstance(A , A ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(A )
def UpperCAmelCase ( self , A , A=None , **A ) -> Dict[str, GenericTensor]:
if return_tensors is None:
snake_case : Optional[int] = self.framework
snake_case : Dict = self.tokenizer(A , return_tensors=A )
self.ensure_exactly_one_mask_token(A )
return model_inputs
def UpperCAmelCase ( self , A ) -> int:
snake_case : Dict = self.model(**A )
snake_case : Tuple = model_inputs["""input_ids"""]
return model_outputs
def UpperCAmelCase ( self , A , A=5 , A=None ) -> List[Any]:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
snake_case : Any = target_ids.shape[0]
snake_case : Dict = model_outputs["""input_ids"""][0]
snake_case : Optional[int] = model_outputs["""logits"""]
if self.framework == "tf":
snake_case : int = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
snake_case : Dict = outputs.numpy()
snake_case : Optional[Any] = outputs[0, masked_index, :]
snake_case : Optional[Any] = stable_softmax(A , axis=-1 )
if target_ids is not None:
snake_case : str = tf.gather_nd(tf.squeeze(A , 0 ) , target_ids.reshape(-1 , 1 ) )
snake_case : Any = tf.expand_dims(A , 0 )
snake_case : Tuple = tf.math.top_k(A , k=A )
snake_case : int = topk.values.numpy(), topk.indices.numpy()
else:
snake_case : List[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=A ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
snake_case : int = outputs[0, masked_index, :]
snake_case : Dict = logits.softmax(dim=-1 )
if target_ids is not None:
snake_case : Optional[Any] = probs[..., target_ids]
snake_case : Optional[Any] = probs.topk(A )
snake_case : List[Any] = []
snake_case : Optional[Any] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
snake_case : str = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
snake_case : List[str] = input_ids.numpy().copy()
if target_ids is not None:
snake_case : Any = target_ids[p].tolist()
snake_case : List[Any] = p
# Filter padding out:
snake_case : List[str] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
snake_case : Tuple = self.tokenizer.decode(A , skip_special_tokens=A )
snake_case : List[str] = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence}
row.append(A )
result.append(A )
if single_mask:
return result[0]
return result
def UpperCAmelCase ( self , A , A=None ) -> List[str]:
if isinstance(A , A ):
snake_case : Optional[Any] = [targets]
try:
snake_case : Union[str, Any] = self.tokenizer.get_vocab()
except Exception:
snake_case : Any = {}
snake_case : str = []
for target in targets:
snake_case : Union[str, Any] = vocab.get(A , A )
if id_ is None:
snake_case : int = self.tokenizer(
A , add_special_tokens=A , return_attention_mask=A , return_token_type_ids=A , max_length=1 , truncation=A , )["""input_ids"""]
if len(A ) == 0:
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
"""We cannot replace it with anything meaningful, ignoring it""" )
continue
snake_case : List[str] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
f"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
snake_case : Union[str, Any] = list(set(A ) )
if len(A ) == 0:
raise ValueError("""At least one target must be provided when passed.""" )
snake_case : str = np.array(A )
return target_ids
def UpperCAmelCase ( self , A=None , A=None ) -> str:
snake_case : Tuple = {}
if targets is not None:
snake_case : List[Any] = self.get_target_ids(A , A )
snake_case : Optional[Any] = target_ids
if top_k is not None:
snake_case : List[str] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""" )
return {}, {}, postprocess_params
def __call__( self , A , *A , **A ) -> Tuple:
snake_case : int = super().__call__(A , **A )
if isinstance(A , A ) and len(A ) == 1:
return outputs[0]
return outputs
| 708 |
import inspect
import unittest
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase ( self ) -> Tuple:
import diffusers
from diffusers.dependency_versions_table import deps
snake_case : List[str] = inspect.getmembers(A , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
snake_case : Tuple = """k-diffusion"""
elif backend == "invisible_watermark":
snake_case : Optional[int] = """invisible-watermark"""
assert backend in deps, f"""{backend} is not in the deps table!"""
| 684 | 0 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
lowerCamelCase : Tuple = TypeVar('T')
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
return (position - 1) // 2
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
return (2 * position) + 1
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
return (2 * position) + 2
class __lowercase (Generic[T] ):
"""simple docstring"""
def __init__( self ) -> None:
snake_case : list[tuple[T, int]] = []
snake_case : dict[T, int] = {}
snake_case : int = 0
def __len__( self ) -> int:
return self.elements
def __repr__( self ) -> str:
return str(self.heap )
def UpperCAmelCase ( self ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def UpperCAmelCase ( self , A , A ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
snake_case : Optional[Any] = self.elements
self.elements += 1
self._bubble_up(A )
def UpperCAmelCase ( self ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
snake_case : Dict = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
snake_case : Optional[int] = self.heap[0]
self._bubble_down(A )
return elem
def UpperCAmelCase ( self , A , A ) -> None:
# Update the weight of the given key
snake_case : List[str] = self.position_map[elem]
snake_case : Optional[Any] = (elem, weight)
if position > 0:
snake_case : Union[str, Any] = get_parent_position(A )
snake_case : Optional[Any] = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(A )
else:
self._bubble_down(A )
else:
self._bubble_down(A )
def UpperCAmelCase ( self , A ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
snake_case : Any = self.position_map[elem]
if curr_pos == 0:
return None
snake_case : Union[str, Any] = get_parent_position(A )
snake_case : str = self.heap[curr_pos]
snake_case : Optional[Any] = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(A , A )
return self._bubble_up(A )
return None
def UpperCAmelCase ( self , A ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
snake_case : List[Any] = self.position_map[elem]
snake_case : Union[str, Any] = self.heap[curr_pos]
snake_case : Dict = get_child_left_position(A )
snake_case : Any = get_child_right_position(A )
if child_left_position < self.elements and child_right_position < self.elements:
snake_case : List[str] = self.heap[child_left_position]
snake_case : Optional[int] = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(A , A )
return self._bubble_down(A )
if child_left_position < self.elements:
snake_case : str = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(A , A )
return self._bubble_down(A )
else:
return None
if child_right_position < self.elements:
snake_case : Any = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(A , A )
return self._bubble_down(A )
return None
def UpperCAmelCase ( self , A , A ) -> None:
# Swap the nodes at the given positions
snake_case : Optional[int] = self.heap[nodea_pos][0]
snake_case : Any = self.heap[nodea_pos][0]
snake_case : Optional[Any] = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
snake_case : str = nodea_pos
snake_case : List[str] = nodea_pos
class __lowercase (Generic[T] ):
"""simple docstring"""
def __init__( self ) -> None:
snake_case : dict[T, dict[T, int]] = {}
snake_case : int = 0
def __repr__( self ) -> str:
return str(self.connections )
def __len__( self ) -> int:
return self.nodes
def UpperCAmelCase ( self , A ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
snake_case : List[Any] = {}
self.nodes += 1
def UpperCAmelCase ( self , A , A , A ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(A )
self.add_node(A )
snake_case : Any = weight
snake_case : str = weight
def SCREAMING_SNAKE_CASE__ ( lowercase ,) -> tuple[dict[T, int], dict[T, T | None]]:
snake_case : dict[T, int] = {node: maxsize for node in graph.connections}
snake_case : dict[T, T | None] = {node: None for node in graph.connections}
snake_case : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(lowercase ,lowercase )
if priority_queue.is_empty():
return dist, parent
# initialization
snake_case : Dict = priority_queue.extract_min()
snake_case : Dict = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
snake_case : List[str] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowercase ,dist[neighbour] )
snake_case : str = node
# running prim's algorithm
while not priority_queue.is_empty():
snake_case : Union[str, Any] = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
snake_case : Dict = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowercase ,dist[neighbour] )
snake_case : Dict = node
return dist, parent
| 709 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
lowerCamelCase : Union[str, Any] = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
lowerCamelCase : List[Any] = 'main'
# Default branch name
lowerCamelCase : Tuple = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
lowerCamelCase : List[Any] = 'aaaaaaa'
# This commit does not exist, so we should 404.
lowerCamelCase : List[Any] = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
lowerCamelCase : int = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> int:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> Optional[Any]:
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def UpperCAmelCase ( self ) -> Optional[Any]:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_tf
def UpperCAmelCase ( self ) -> str:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_flax
def UpperCAmelCase ( self ) -> Any:
# Flax models don't have labels
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , [] )
| 684 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : Optional[int] = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
lowerCamelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 710 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """audio-spectrogram-transformer"""
def __init__( self , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.0 , A=0.0 , A=0.02 , A=1e-1_2 , A=1_6 , A=True , A=1_0 , A=1_0 , A=1_0_2_4 , A=1_2_8 , **A , ) -> int:
super().__init__(**A )
snake_case : Any = hidden_size
snake_case : Tuple = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Dict = intermediate_size
snake_case : int = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : int = initializer_range
snake_case : int = layer_norm_eps
snake_case : Any = patch_size
snake_case : List[Any] = qkv_bias
snake_case : int = frequency_stride
snake_case : Any = time_stride
snake_case : Union[str, Any] = max_length
snake_case : Any = num_mel_bins
| 684 | 0 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@parameterized.expand([(None,), ("""foo.json""",)] )
def UpperCAmelCase ( self , A ) -> Optional[Any]:
snake_case : Any = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A , config_name=A )
snake_case : Optional[Any] = GenerationConfig.from_pretrained(A , config_name=A )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0 )
self.assertEqual(loaded_config.max_length , 2_0 )
self.assertEqual(loaded_config.max_time , A )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : str = AutoConfig.from_pretrained("""gpt2""" )
snake_case : Optional[int] = GenerationConfig.from_model_config(A )
snake_case : Dict = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(A , A )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase ( self ) -> Dict:
snake_case : List[Any] = GenerationConfig()
snake_case : Union[str, Any] = {
"""max_new_tokens""": 1_0_2_4,
"""foo""": """bar""",
}
snake_case : int = copy.deepcopy(A )
snake_case : Optional[Any] = generation_config.update(**A )
# update_kwargs was not modified (no side effects)
self.assertEqual(A , A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(A , {"""foo""": """bar"""} )
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Optional[Any] = GenerationConfig()
snake_case : Optional[int] = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(A )
snake_case : str = GenerationConfig.from_pretrained(A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
snake_case : int = GenerationConfig.from_model_config(A )
assert not hasattr(A , """foo""" ) # no new kwargs should be initialized if from config
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Tuple = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , A )
self.assertEqual(default_config.num_beams , 1 )
snake_case : List[Any] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , A )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A )
snake_case : Any = GenerationConfig.from_pretrained(A , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase ( cls ) -> str:
snake_case : int = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCAmelCase ( cls ) -> Optional[Any]:
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def UpperCAmelCase ( self ) -> Dict:
snake_case : List[str] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
snake_case : str = GenerationConfig.from_pretrained(f"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id="""test-generation-config""" , push_to_hub=A , use_auth_token=self._token )
snake_case : Optional[int] = GenerationConfig.from_pretrained(f"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Dict = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
snake_case : Union[str, Any] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=A , use_auth_token=self._token )
snake_case : Union[str, Any] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
| 711 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase : Any = logging.get_logger(__name__)
class __lowercase (enum.Enum ):
"""simple docstring"""
_snake_case = 0
_snake_case = 1
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """generated"""
def __init__( self , *A , **A ) -> Optional[Any]:
super().__init__(*A , **A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase ( self , A=None , A=None , A=None , A=None , A=None , A=None , **A , ) -> Optional[int]:
snake_case : Tuple = {}
if truncation is not None:
snake_case : Union[str, Any] = truncation
snake_case : Dict = generate_kwargs
snake_case : int = {}
if return_tensors is not None and return_type is None:
snake_case : List[Any] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
snake_case : List[str] = return_type
if clean_up_tokenization_spaces is not None:
snake_case : int = clean_up_tokenization_spaces
if stop_sequence is not None:
snake_case : Tuple = self.tokenizer.encode(A , add_special_tokens=A )
if len(A ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
snake_case : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
return True
def UpperCAmelCase ( self , *A , A ) -> Tuple:
snake_case : Union[str, Any] = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , A ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
snake_case : Union[str, Any] = ([prefix + arg for arg in args[0]],)
snake_case : List[Any] = True
elif isinstance(args[0] , A ):
snake_case : str = (prefix + args[0],)
snake_case : str = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
snake_case : Optional[Any] = self.tokenizer(*A , padding=A , truncation=A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *A , **A ) -> Union[str, Any]:
snake_case : Tuple = super().__call__(*A , **A )
if (
isinstance(args[0] , A )
and all(isinstance(A , A ) for el in args[0] )
and all(len(A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase ( self , A , A=TruncationStrategy.DO_NOT_TRUNCATE , **A ) -> str:
snake_case : Optional[Any] = self._parse_and_tokenize(A , truncation=A , **A )
return inputs
def UpperCAmelCase ( self , A , **A ) -> Tuple:
if self.framework == "pt":
snake_case , snake_case : List[str] = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
snake_case , snake_case : Optional[Any] = tf.shape(model_inputs["""input_ids"""] ).numpy()
snake_case : Dict = generate_kwargs.get("""min_length""" , self.model.config.min_length )
snake_case : str = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(A , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
snake_case : List[str] = self.model.generate(**A , **A )
snake_case : Dict = output_ids.shape[0]
if self.framework == "pt":
snake_case : List[Any] = output_ids.reshape(A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
snake_case : Any = tf.reshape(A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase ( self , A , A=ReturnType.TEXT , A=False ) -> Union[str, Any]:
snake_case : Tuple = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
snake_case : Dict = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
snake_case : int = {
f"""{self.return_name}_text""": self.tokenizer.decode(
A , skip_special_tokens=A , clean_up_tokenization_spaces=A , )
}
records.append(A )
return records
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """summary"""
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
def UpperCAmelCase ( self , A , A , A ) -> bool:
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """translation"""
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def UpperCAmelCase ( self , *A , A=TruncationStrategy.DO_NOT_TRUNCATE , A=None , A=None ) -> Optional[int]:
if getattr(self.tokenizer , """_build_translation_inputs""" , A ):
return self.tokenizer._build_translation_inputs(
*A , return_tensors=self.framework , truncation=A , src_lang=A , tgt_lang=A )
else:
return super()._parse_and_tokenize(*A , truncation=A )
def UpperCAmelCase ( self , A=None , A=None , **A ) -> Union[str, Any]:
snake_case , snake_case , snake_case : str = super()._sanitize_parameters(**A )
if src_lang is not None:
snake_case : Tuple = src_lang
if tgt_lang is not None:
snake_case : str = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
snake_case : Union[str, Any] = kwargs.get("""task""" , self.task )
snake_case : Any = task.split("""_""" )
if task and len(A ) == 4:
# translation, XX, to YY
snake_case : Optional[Any] = items[1]
snake_case : Dict = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
| 684 | 0 |
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase=1024 ) -> Dict:
snake_case : Union[str, Any] = [], []
snake_case : List[str] = list(zip(lowercase ,lowercase ) )
snake_case : str = sorted_examples[0]
def is_too_big(lowercase ):
return tok(lowercase ,return_tensors="""pt""" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
snake_case : str = new_src + """ """ + src
snake_case : Optional[int] = new_tgt + """ """ + tgt
if is_too_big(lowercase ) or is_too_big(lowercase ): # cant fit, finalize example
finished_src.append(lowercase )
finished_tgt.append(lowercase )
snake_case : Optional[Any] = src, tgt
else: # can fit, keep adding
snake_case : str = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(lowercase )
finished_tgt.append(lowercase )
return finished_src, finished_tgt
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> int:
snake_case : str = Path(lowercase )
save_path.mkdir(exist_ok=lowercase )
for split in ["train"]:
snake_case : Union[str, Any] = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
snake_case : Optional[Any] = [x.rstrip() for x in Path(lowercase ).open().readlines()]
snake_case : Union[str, Any] = [x.rstrip() for x in Path(lowercase ).open().readlines()]
snake_case : Dict = pack_examples(lowercase ,lowercase ,lowercase ,lowercase )
print(f"""packed {split} split from {len(lowercase )} examples -> {len(lowercase )}.""" )
Path(save_path / f"""{split}.source""" ).open("""w""" ).write("""\n""".join(lowercase ) )
Path(save_path / f"""{split}.target""" ).open("""w""" ).write("""\n""".join(lowercase ) )
for split in ["val", "test"]:
snake_case : List[Any] = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
shutil.copyfile(lowercase ,save_path / f"""{split}.source""" )
shutil.copyfile(lowercase ,save_path / f"""{split}.target""" )
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
snake_case : Dict = argparse.ArgumentParser()
parser.add_argument("""--tok_name""" ,type=lowercase ,help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""--max_seq_len""" ,type=lowercase ,default=128 )
parser.add_argument("""--data_dir""" ,type=lowercase )
parser.add_argument("""--save_path""" ,type=lowercase )
snake_case : Union[str, Any] = parser.parse_args()
snake_case : Any = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(lowercase ,Path(args.data_dir ) ,args.max_seq_len ,args.save_path )
if __name__ == "__main__":
packer_cli()
| 712 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
snake_case : int = []
for line in lines:
snake_case : Dict = re.sub(R"""#.*""" ,"""""" ,lowercase ) # remove comments
if line:
filtered_lines.append(lowercase )
snake_case : Optional[int] = """\n""".join(lowercase )
# Make a hash from all this code
snake_case : List[str] = full_str.encode("""utf-8""" )
return shaaaa(lowercase ).hexdigest()
# get importable module names and hash for caching
lowerCamelCase : Any = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
lowerCamelCase : Optional[int] = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
lowerCamelCase : Tuple = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
lowerCamelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 684 | 0 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowerCamelCase : List[Any] = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(4_2)
lowerCamelCase : Tuple = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
lowerCamelCase : Any = 'zero2'
lowerCamelCase : Dict = 'zero3'
lowerCamelCase : List[Any] = [ZEROa, ZEROa]
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Optional[int]:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
snake_case : Optional[Any] = parameterized.to_safe_name("""_""".join(str(lowercase ) for x in param.args ) )
return f"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
lowerCamelCase : Optional[int] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
@parameterized.expand(A , name_func=A )
def UpperCAmelCase ( self , A , A ) -> Dict:
self.run_and_check(
stage=A , model=A , distributed=A , fpaa=A , )
@require_torch_multi_gpu
@parameterized.expand(A , name_func=A )
def UpperCAmelCase ( self , A , A ) -> List[Any]:
self.run_and_check(
stage=A , model=A , distributed=A , fpaa=A , )
@parameterized.expand(A , name_func=A )
def UpperCAmelCase ( self , A , A ) -> str:
self.run_and_check(
stage=A , model=A , distributed=A , fpaa=A , )
@require_torch_multi_gpu
@parameterized.expand(A , name_func=A )
def UpperCAmelCase ( self , A , A ) -> Optional[Any]:
self.run_and_check(
stage=A , model=A , distributed=A , fpaa=A , )
def UpperCAmelCase ( self , A ) -> Tuple:
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def UpperCAmelCase ( self , A , A , A = 1_0 , A = True , A = True , A = True , ) -> Tuple:
snake_case : List[str] = models[model]
snake_case : List[Any] = self.run_trainer(
stage=A , model_name=A , eval_steps=A , num_train_epochs=1 , distributed=A , fpaa=A , )
self.do_checks(A )
return output_dir
def UpperCAmelCase ( self , A , A , A = 1_0 , A = 1 , A = True , A = True , ) -> Dict:
snake_case : List[Any] = self.get_auto_remove_tmp_dir("""./xxx""" , after=A )
snake_case : List[Any] = f"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(A )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(["""--fp16"""] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
snake_case : Union[str, Any] = f"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
snake_case : List[Any] = [f"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
snake_case : str = self.get_launcher(A )
snake_case : List[str] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(A , env=self.get_env() )
return output_dir
def UpperCAmelCase ( self , A=False ) -> Optional[Any]:
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
snake_case : str = min(2 , get_gpu_count() ) if distributed else 1
return f"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 713 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Tuple:
# Initialise PyTorch model
snake_case : int = RemBertConfig.from_json_file(lowercase )
print("""Building PyTorch model from configuration: {}""".format(str(lowercase ) ) )
snake_case : Tuple = RemBertModel(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowercase ,lowercase ,lowercase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(lowercase ) )
torch.save(model.state_dict() ,lowercase )
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCamelCase : Dict = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 684 | 0 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCamelCase : Dict = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
lowerCamelCase : Tuple = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=False ) -> Tuple:
snake_case : Dict = create_model(
"""HTSAT-tiny""" ,"""roberta""" ,lowercase ,precision="""fp32""" ,device="""cuda:0""" if torch.cuda.is_available() else """cpu""" ,enable_fusion=lowercase ,fusion_type="""aff_2d""" if enable_fusion else None ,)
return model, model_cfg
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[Any]:
snake_case : str = {}
snake_case : List[Any] = R""".*sequential.(\d+).*"""
snake_case : List[Any] = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
snake_case : Dict = key.replace(lowercase ,lowercase )
if re.match(lowercase ,lowercase ):
# replace sequential layers with list
snake_case : Any = re.match(lowercase ,lowercase ).group(1 )
snake_case : Any = key.replace(f"""sequential.{sequential_layer}.""" ,f"""layers.{int(lowercase )//3}.linear.""" )
elif re.match(lowercase ,lowercase ):
snake_case : Union[str, Any] = int(re.match(lowercase ,lowercase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
snake_case : Any = 1 if projecton_layer == 0 else 2
snake_case : Tuple = key.replace(f"""_projection.{projecton_layer}.""" ,f"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
snake_case : Tuple = value
snake_case : List[str] = mixed_qkv.size(0 ) // 3
snake_case : Any = mixed_qkv[:qkv_dim]
snake_case : Any = mixed_qkv[qkv_dim : qkv_dim * 2]
snake_case : Tuple = mixed_qkv[qkv_dim * 2 :]
snake_case : Dict = query_layer
snake_case : Optional[Any] = key_layer
snake_case : str = value_layer
else:
snake_case : Optional[int] = value
return model_state_dict
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase=False ) -> List[Any]:
snake_case : Optional[int] = init_clap(lowercase ,enable_fusion=lowercase )
clap_model.eval()
snake_case : str = clap_model.state_dict()
snake_case : int = rename_state_dict(lowercase )
snake_case : Any = ClapConfig()
snake_case : Any = enable_fusion
snake_case : str = ClapModel(lowercase )
# ignore the spectrogram embedding layer
model.load_state_dict(lowercase ,strict=lowercase )
model.save_pretrained(lowercase )
transformers_config.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 714 |
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[str]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Union[str, Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Any:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Dict:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
| 684 | 0 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
return "".join(sorted(lowercase ) )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list[str]:
return word_by_signature[signature(lowercase )]
lowerCamelCase : str = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
lowerCamelCase : int = sorted({word.strip().lower() for word in data.splitlines()})
lowerCamelCase : List[str] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
lowerCamelCase : Dict = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 715 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCamelCase : List[str] = 3
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
print("""Generating primitive root of p""" )
while True:
snake_case : Optional[int] = random.randrange(3 ,lowercase )
if pow(lowercase ,2 ,lowercase ) == 1:
continue
if pow(lowercase ,lowercase ,lowercase ) == 1:
continue
return g
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print("""Generating prime p...""" )
snake_case : Optional[int] = rabin_miller.generate_large_prime(lowercase ) # select large prime number.
snake_case : Optional[int] = primitive_root(lowercase ) # one primitive root on modulo p.
snake_case : Optional[Any] = random.randrange(3 ,lowercase ) # private_key -> have to be greater than 2 for safety.
snake_case : Tuple = cryptomath.find_mod_inverse(pow(lowercase ,lowercase ,lowercase ) ,lowercase )
snake_case : str = (key_size, e_a, e_a, p)
snake_case : Optional[Any] = (key_size, d)
return public_key, private_key
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> None:
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print("""\nWARNING:""" )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
snake_case , snake_case : Optional[Any] = generate_key(lowercase )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
print("""Making key files...""" )
make_key_files("""elgamal""" ,2048 )
print("""Key files generation successful""" )
if __name__ == "__main__":
main()
| 684 | 0 |
'''simple docstring'''
from math import sqrt
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Dict = 0
for i in range(1 ,int(sqrt(lowercase ) + 1 ) ):
if n % i == 0 and i != sqrt(lowercase ):
total += i + n // i
elif i == sqrt(lowercase ):
total += i
return total - n
def SCREAMING_SNAKE_CASE__ ( lowercase = 10000 ) -> int:
snake_case : Dict = sum(
i
for i in range(1 ,lowercase )
if sum_of_divisors(sum_of_divisors(lowercase ) ) == i and sum_of_divisors(lowercase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 716 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
snake_case : Dict = _modexpt(lowercase ,exponent // 2 ,lowercase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowercase ,exponent - 1 ,lowercase )) % modulo_value
def SCREAMING_SNAKE_CASE__ ( lowercase = 1777 ,lowercase = 1855 ,lowercase = 8 ) -> int:
snake_case : int = base
for _ in range(1 ,lowercase ):
snake_case : List[str] = _modexpt(lowercase ,lowercase ,10**digits )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 | 0 |
lowerCamelCase : int = [0, 2, 4, 6, 8]
lowerCamelCase : Optional[Any] = [1, 3, 5, 7, 9]
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> int:
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 ,-1 ,-1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
snake_case : int = 0
for digit in range(10 ):
snake_case : Optional[int] = digit
result += reversible_numbers(
0 ,(remainder + 2 * digit) // 10 ,lowercase ,lowercase )
return result
snake_case : List[Any] = 0
for digita in range(10 ):
snake_case : str = digita
if (remainder + digita) % 2 == 0:
snake_case : Any = ODD_DIGITS
else:
snake_case : Optional[Any] = EVEN_DIGITS
for digita in other_parity_digits:
snake_case : List[str] = digita
result += reversible_numbers(
remaining_length - 2 ,(remainder + digita + digita) // 10 ,lowercase ,lowercase ,)
return result
def SCREAMING_SNAKE_CASE__ ( lowercase = 9 ) -> int:
snake_case : Optional[int] = 0
for length in range(1 ,max_power + 1 ):
result += reversible_numbers(lowercase ,0 ,[0] * length ,lowercase )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 717 |
from itertools import product
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list[int]:
snake_case : Tuple = sides_number
snake_case : List[str] = max_face_number * dice_number
snake_case : Any = [0] * (max_total + 1)
snake_case : int = 1
snake_case : List[str] = range(lowercase ,max_face_number + 1 )
for dice_numbers in product(lowercase ,repeat=lowercase ):
snake_case : Any = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def SCREAMING_SNAKE_CASE__ ( ) -> float:
snake_case : List[str] = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
snake_case : str = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
snake_case : Optional[int] = 0
snake_case : List[str] = 9
snake_case : Union[str, Any] = 4 * 9
snake_case : Dict = 6
for peter_total in range(lowercase ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
snake_case : str = (4**9) * (6**6)
snake_case : int = peter_wins_count / total_games_number
snake_case : Optional[int] = round(lowercase ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 | 0 |
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase=False ) -> Any:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
if not is_sharded:
snake_case : Any = os.path.abspath(lowercase )
logger.info(f"""Loading PyTorch weights from {pt_path}""" )
snake_case : Any = torch.load(lowercase ,map_location="""cpu""" )
logger.info(f"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" )
snake_case : Union[str, Any] = convert_pytorch_state_dict_to_flax(lowercase ,lowercase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
snake_case : List[Any] = convert_pytorch_sharded_state_dict_to_flax(lowercase ,lowercase )
return flax_state_dict
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ,) -> (Tuple[str], np.ndarray):
def is_key_or_prefix_key_in_dict(lowercase ) -> bool:
return len(set(lowercase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
snake_case : List[Any] = pt_tuple_key[:-1] + ("""scale""",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(lowercase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
snake_case : str = pt_tuple_key[:-1] + ("""mean""",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(lowercase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
snake_case : Optional[int] = pt_tuple_key[:-1] + ("""var""",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(lowercase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
snake_case : Optional[Any] = pt_tuple_key[:-1] + ("""embedding""",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(lowercase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
snake_case : int = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(lowercase ):
snake_case : Optional[Any] = pt_tensor.transpose(2 ,3 ,1 ,0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
snake_case : Optional[int] = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(lowercase ):
snake_case : Tuple = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
snake_case : int = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
snake_case : str = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
snake_case : Dict = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
snake_case : Optional[Any] = pt_tuple_key[-2] + """_g"""
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
snake_case : List[str] = pt_tuple_key[-2] + """_v"""
if name is not None:
snake_case : Optional[Any] = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Optional[Any]:
# convert pytorch tensor to numpy
snake_case : Tuple = {k: v.numpy() for k, v in pt_state_dict.items()}
snake_case : Dict = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
snake_case : List[str] = flax_model.params["""params"""]
else:
snake_case : List[str] = flax_model.params
snake_case : List[Any] = flatten_dict(lowercase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
snake_case : Optional[int] = flatten_dict(flax_model.params["""batch_stats"""] )
random_flax_state_dict.update(lowercase )
snake_case : Optional[Any] = {}
snake_case : Tuple = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
snake_case : Any = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
snake_case : Union[str, Any] = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
snake_case : List[Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
snake_case : Tuple = pt_tuple_key[1:]
# Correctly rename weight parameters
snake_case : str = rename_key_and_reshape_tensor(
lowercase ,lowercase ,lowercase ,lowercase )
# add model prefix if necessary
snake_case : Dict = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
snake_case : Tuple = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
snake_case : str = jnp.asarray(lowercase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(lowercase ,lowercase )
continue
# also add unexpected weight so that warning is thrown
snake_case : int = jnp.asarray(lowercase )
else:
# also add unexpected weight so that warning is thrown
snake_case : str = jnp.asarray(lowercase )
return unflatten_dict(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Dict:
import torch
# Load the index
snake_case : Any = {}
for shard_file in shard_filenames:
# load using msgpack utils
snake_case : Tuple = torch.load(lowercase )
snake_case : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()}
snake_case : List[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
snake_case : List[Any] = flax_model.params["""params"""]
snake_case : Optional[int] = flatten_dict(lowercase )
random_flax_state_dict.update(flatten_dict(flax_model.params["""batch_stats"""] ) )
else:
snake_case : Optional[int] = flax_model.params
snake_case : Optional[int] = flatten_dict(lowercase )
snake_case : List[str] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
snake_case : int = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
snake_case : int = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
snake_case : Dict = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
snake_case : Dict = pt_tuple_key[1:]
# Correctly rename weight parameters
snake_case : Union[str, Any] = rename_key_and_reshape_tensor(
lowercase ,lowercase ,lowercase ,lowercase )
# add model prefix if necessary
snake_case : Optional[int] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
snake_case : int = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
snake_case : Tuple = jnp.asarray(lowercase )
continue
if "var" in flax_key[-1]:
snake_case : List[Any] = jnp.asarray(lowercase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(lowercase ,lowercase )
continue
# also add unexpected weight so that warning is thrown
snake_case : List[str] = jnp.asarray(lowercase )
else:
# also add unexpected weight so that warning is thrown
snake_case : Optional[Any] = jnp.asarray(lowercase )
return unflatten_dict(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> List[str]:
snake_case : str = os.path.abspath(lowercase )
logger.info(f"""Loading Flax weights from {flax_checkpoint_path}""" )
# import correct flax class
snake_case : str = getattr(lowercase ,"""Flax""" + model.__class__.__name__ )
# load flax weight dict
with open(lowercase ,"""rb""" ) as state_f:
try:
snake_case : str = from_bytes(lowercase ,state_f.read() )
except UnpicklingError:
raise EnvironmentError(f"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(lowercase ,lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Union[str, Any]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
snake_case : Optional[Any] = flatten_dict(jax.tree_util.tree_map(lambda lowercase : x.dtype == jnp.bfloataa ,lowercase ) ).values()
if any(lowercase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
snake_case : Dict = jax.tree_util.tree_map(
lambda lowercase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params ,lowercase )
snake_case : List[Any] = flatten_dict(lowercase )
snake_case : Optional[Any] = pt_model.state_dict()
snake_case : int = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
snake_case : Dict = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
snake_case : int = []
snake_case : Tuple = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
snake_case : List[str] = flax_key_tuple[0] == pt_model.base_model_prefix
snake_case : Union[str, Any] = """.""".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
snake_case : Optional[int] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
snake_case : Optional[int] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(lowercase ) not in pt_model_dict:
# conv layer
snake_case : Optional[Any] = flax_key_tuple[:-1] + ("""weight""",)
snake_case : Union[str, Any] = jnp.transpose(lowercase ,(3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowercase ) not in pt_model_dict:
# linear layer
snake_case : List[Any] = flax_key_tuple[:-1] + ("""weight""",)
snake_case : List[Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
snake_case : List[Any] = flax_key_tuple[:-1] + ("""weight""",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
snake_case : Any = flax_key_tuple[:-1] + ("""running_mean""",)
elif "var" in flax_key_tuple[-1]:
snake_case : List[Any] = flax_key_tuple[:-1] + ("""running_var""",)
if "batch_stats" in flax_state:
snake_case : List[str] = """.""".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
snake_case : str = """.""".join(lowercase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
snake_case : Optional[Any] = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
snake_case : Tuple = key.split(""".""" )
snake_case : Union[str, Any] = None
if key_components[-3::2] == ["parametrizations", "original0"]:
snake_case : List[Any] = key_components[-2] + """_g"""
elif key_components[-3::2] == ["parametrizations", "original1"]:
snake_case : Tuple = key_components[-2] + """_v"""
if name is not None:
snake_case : List[str] = key_components[:-3] + [name]
snake_case : Optional[int] = """.""".join(lowercase )
snake_case : Optional[Any] = key
if flax_key in special_pt_names:
snake_case : Any = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
snake_case : str = np.asarray(lowercase ) if not isinstance(lowercase ,np.ndarray ) else flax_tensor
snake_case : List[Any] = torch.from_numpy(lowercase )
# remove from missing keys
missing_keys.remove(lowercase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowercase )
pt_model.load_state_dict(lowercase )
# re-transform missing_keys to list
snake_case : Optional[int] = list(lowercase )
if len(lowercase ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
else:
logger.warning(f"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" )
if len(lowercase ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
""" use it for predictions and inference.""" )
else:
logger.warning(
f"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"""
"""If your task is similar to the task the model of the checkpoint was trained on, """
f"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" )
return pt_model | 718 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 684 | 0 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Optional[Any]:
snake_case : Optional[int] = 0
if start < end:
snake_case : Any = randint(lowercase ,lowercase )
snake_case : List[Any] = a[end]
snake_case : str = a[pivot]
snake_case : int = temp
snake_case : Any = _in_place_partition(lowercase ,lowercase ,lowercase )
count += _in_place_quick_sort(lowercase ,lowercase ,p - 1 )
count += _in_place_quick_sort(lowercase ,p + 1 ,lowercase )
return count
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Dict:
snake_case : List[Any] = 0
snake_case : Optional[int] = randint(lowercase ,lowercase )
snake_case : List[str] = a[end]
snake_case : Tuple = a[pivot]
snake_case : Dict = temp
snake_case : Any = start - 1
for index in range(lowercase ,lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
snake_case : List[Any] = new_pivot_index + 1
snake_case : int = a[new_pivot_index]
snake_case : Dict = a[index]
snake_case : str = temp
snake_case : Tuple = a[new_pivot_index + 1]
snake_case : Tuple = a[end]
snake_case : Any = temp
return new_pivot_index + 1, count
lowerCamelCase : Optional[int] = TemporaryFile()
lowerCamelCase : List[str] = 1_0_0 # 1000 elements are to be sorted
lowerCamelCase : int = 0, 1 # mean and standard deviation
lowerCamelCase : List[str] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
lowerCamelCase : List[Any] = np.load(outfile)
lowerCamelCase : Tuple = len(M) - 1
lowerCamelCase : int = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 719 |
import os
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
with open(os.path.dirname(lowercase ) + """/grid.txt""" ) as f:
snake_case : Tuple = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowercase ) for x in f.readline().split()] )
snake_case : Optional[Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
snake_case : List[Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
snake_case : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
snake_case : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
snake_case : str = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
snake_case : int = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
snake_case : int = temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
snake_case : Any = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
snake_case : Any = temp
return maximum
if __name__ == "__main__":
print(solution())
| 684 | 0 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Union[str, Any] = SMALL_MODEL_IDENTIFIER
snake_case : List[Any] = """pt"""
snake_case : Any = """tf"""
def UpperCAmelCase ( self , A ) -> Any:
snake_case : Union[str, Any] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(A )
def UpperCAmelCase ( self , A ) -> Any:
snake_case : Tuple = TFAutoModel.from_pretrained(self.test_model , from_pt=A )
model_tf.save_pretrained(A )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Dict = """mock_framework"""
# Framework provided - return whatever the user provides
snake_case : Optional[int] = FeaturesManager.determine_framework(self.test_model , A )
self.assertEqual(A , A )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(A )
snake_case : str = FeaturesManager.determine_framework(A , A )
self.assertEqual(A , A )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(A )
snake_case : Dict = FeaturesManager.determine_framework(A , A )
self.assertEqual(A , A )
def UpperCAmelCase ( self ) -> Dict:
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(A )
snake_case : int = FeaturesManager.determine_framework(A )
self.assertEqual(A , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(A )
snake_case : Optional[Any] = FeaturesManager.determine_framework(A )
self.assertEqual(A , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(A ):
snake_case : Any = FeaturesManager.determine_framework(A )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : Union[str, Any] = MagicMock(return_value=A )
with patch("""transformers.onnx.features.is_tf_available""" , A ):
snake_case : List[str] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(A , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
snake_case : Optional[Any] = MagicMock(return_value=A )
with patch("""transformers.onnx.features.is_torch_available""" , A ):
snake_case : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(A , self.framework_tf )
# Both in environment -> use PyTorch
snake_case : List[Any] = MagicMock(return_value=A )
snake_case : Any = MagicMock(return_value=A )
with patch("""transformers.onnx.features.is_tf_available""" , A ), patch(
"""transformers.onnx.features.is_torch_available""" , A ):
snake_case : List[str] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(A , self.framework_pt )
# Both not in environment -> raise error
snake_case : Union[str, Any] = MagicMock(return_value=A )
snake_case : List[Any] = MagicMock(return_value=A )
with patch("""transformers.onnx.features.is_tf_available""" , A ), patch(
"""transformers.onnx.features.is_torch_available""" , A ):
with self.assertRaises(A ):
snake_case : Dict = FeaturesManager.determine_framework(self.test_model )
| 720 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list:
for i in range(len(lowercase ) - 1 ,0 ,-1 ):
snake_case : Any = False
for j in range(lowercase ,0 ,-1 ):
if unsorted[j] < unsorted[j - 1]:
snake_case , snake_case : Optional[Any] = unsorted[j - 1], unsorted[j]
snake_case : Dict = True
for j in range(lowercase ):
if unsorted[j] > unsorted[j + 1]:
snake_case , snake_case : Dict = unsorted[j + 1], unsorted[j]
snake_case : Tuple = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Any = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase : Optional[int] = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 684 | 0 |
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , A="" , A="train" ) -> int:
assert os.path.isdir(A )
snake_case : Optional[Any] = []
snake_case : int = os.listdir(A )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
snake_case : Tuple = os.path.join(A , A )
if not os.path.isfile(A ):
continue
self.documents.append(A )
def __len__( self ) -> int:
return len(self.documents )
def __getitem__( self , A ) -> List[str]:
snake_case : str = self.documents[idx]
snake_case : Any = document_path.split("""/""" )[-1]
with open(A , encoding="""utf-8""" ) as source:
snake_case : List[Any] = source.read()
snake_case : Optional[Any] = process_story(A )
return document_name, story_lines, summary_lines
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Any:
snake_case : int = list(filter(lambda lowercase : len(lowercase ) != 0 ,[line.strip() for line in raw_story.split("""\n""" )] ) )
# for some unknown reason some lines miss a period, add it
snake_case : Union[str, Any] = [_add_missing_period(lowercase ) for line in nonempty_lines]
# gather article lines
snake_case : List[str] = []
snake_case : Dict = deque(lowercase )
while True:
try:
snake_case : Union[str, Any] = lines.popleft()
if element.startswith("""@highlight""" ):
break
story_lines.append(lowercase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
snake_case : str = list(filter(lambda lowercase : not t.startswith("""@highlight""" ) ,lowercase ) )
return story_lines, summary_lines
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[int]:
snake_case : List[str] = [""".""", """!""", """?""", """...""", """'""", """`""", """\"""", """\u2019""", """\u2019""", """)"""]
if line.startswith("""@highlight""" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> int:
if len(lowercase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(lowercase )) )
return sequence
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> List[Any]:
snake_case : Optional[Any] = torch.ones_like(lowercase )
snake_case : List[Any] = sequence == pad_token_id
snake_case : List[Any] = 0
return mask
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Dict:
snake_case : Tuple = [tokenizer.encode(lowercase ) for line in story_lines]
snake_case : int = [token for sentence in story_lines_token_ids for token in sentence]
snake_case : Optional[int] = [tokenizer.encode(lowercase ) for line in summary_lines]
snake_case : Union[str, Any] = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Any:
snake_case : Optional[Any] = []
for sequence in batch:
snake_case : Dict = -1
snake_case : Optional[int] = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(lowercase )
return torch.tensor(lowercase )
| 721 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Any = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
lowerCamelCase : Any = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
lowerCamelCase : Optional[int] = {
'jukebox': 5_1_2,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_LYRIC_TOKENS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A , A , A=["v3", "v2", "v2"] , A=5_1_2 , A=5 , A="<|endoftext|>" , **A , ) -> Optional[Any]:
snake_case : Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
super().__init__(
unk_token=A , n_genres=A , version=A , max_n_lyric_tokens=A , **A , )
snake_case : Optional[Any] = version
snake_case : Optional[Any] = max_n_lyric_tokens
snake_case : Tuple = n_genres
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : Union[str, Any] = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : str = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : List[str] = json.load(A )
snake_case : Tuple = r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 7_9:
snake_case : Optional[Any] = oov.replace(r"""\-'""" , r"""\-+'""" )
snake_case : Optional[Any] = regex.compile(A )
snake_case : Optional[Any] = {v: k for k, v in self.artists_encoder.items()}
snake_case : int = {v: k for k, v in self.genres_encoder.items()}
snake_case : List[Any] = {v: k for k, v in self.lyrics_encoder.items()}
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def UpperCAmelCase ( self ) -> str:
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def UpperCAmelCase ( self , A , A , A ) -> Optional[Any]:
snake_case : Optional[int] = [self.artists_encoder.get(A , 0 ) for artist in list_artists]
for genres in range(len(A ) ):
snake_case : Optional[int] = [self.genres_encoder.get(A , 0 ) for genre in list_genres[genres]]
snake_case : Union[str, Any] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
snake_case : Optional[Any] = [[self.lyrics_encoder.get(A , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def UpperCAmelCase ( self , A ) -> List[str]:
return list(A )
def UpperCAmelCase ( self , A , A , A , **A ) -> List[str]:
snake_case , snake_case , snake_case : Any = self.prepare_for_tokenization(A , A , A )
snake_case : Tuple = self._tokenize(A )
return artist, genre, lyrics
def UpperCAmelCase ( self , A , A , A , A = False ) -> Tuple[str, str, str, Dict[str, Any]]:
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
snake_case : Tuple = artists[idx].lower()
snake_case : List[Any] = [genres[idx].lower()]
else:
snake_case : Union[str, Any] = self._normalize(artists[idx] ) + """.v2"""
snake_case : Any = [
self._normalize(A ) + """.v2""" for genre in genres[idx].split("""_""" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
snake_case : str = regex.compile(r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" )
snake_case : Dict = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
snake_case : Union[str, Any] = {vocab[index]: index + 1 for index in range(len(A ) )}
snake_case : Optional[int] = 0
snake_case : Union[str, Any] = len(A ) + 1
snake_case : Optional[int] = self.vocab
snake_case : str = {v: k for k, v in self.vocab.items()}
snake_case : int = """"""
else:
snake_case : Optional[int] = regex.compile(r"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""" )
snake_case : int = self._run_strip_accents(A )
snake_case : Any = lyrics.replace("""\\""" , """\n""" )
snake_case : Tuple = self.out_of_vocab.sub("""""" , A ), [], []
return artists, genres, lyrics
def UpperCAmelCase ( self , A ) -> List[Any]:
snake_case : int = unicodedata.normalize("""NFD""" , A )
snake_case : int = []
for char in text:
snake_case : Optional[Any] = unicodedata.category(A )
if cat == "Mn":
continue
output.append(A )
return "".join(A )
def UpperCAmelCase ( self , A ) -> str:
snake_case : Dict = (
[chr(A ) for i in range(ord("""a""" ) , ord("""z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""A""" ) , ord("""Z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""0""" ) , ord("""9""" ) + 1 )]
+ ["""."""]
)
snake_case : Dict = frozenset(A )
snake_case : Dict = re.compile(r"""_+""" )
snake_case : str = """""".join([c if c in accepted else """_""" for c in text.lower()] )
snake_case : List[Any] = pattern.sub("""_""" , A ).strip("""_""" )
return text
def UpperCAmelCase ( self , A ) -> str:
return " ".join(A )
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[Any]:
# Convert to TensorType
if not isinstance(A , A ):
snake_case : Tuple = TensorType(A )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""" )
import tensorflow as tf
snake_case : Union[str, Any] = tf.constant
snake_case : int = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""" )
import torch
snake_case : List[str] = torch.tensor
snake_case : Optional[Any] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""" )
import jax.numpy as jnp # noqa: F811
snake_case : Optional[int] = jnp.array
snake_case : Dict = _is_jax
else:
snake_case : List[str] = np.asarray
snake_case : Tuple = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
snake_case : Any = [inputs]
if not is_tensor(A ):
snake_case : List[Any] = as_tensor(A )
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""" )
return inputs
def __call__( self , A , A , A="" , A="pt" ) -> BatchEncoding:
snake_case : List[str] = [0, 0, 0]
snake_case : List[str] = [artist] * len(self.version )
snake_case : List[Any] = [genres] * len(self.version )
snake_case , snake_case , snake_case : Optional[int] = self.tokenize(A , A , A )
snake_case , snake_case , snake_case : int = self._convert_token_to_id(A , A , A )
snake_case : Any = [-INFINITY] * len(full_tokens[-1] )
snake_case : int = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=A )
for i in range(len(self.version ) )
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks} )
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=A ) )
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=A ) )
snake_case : Tuple = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=A ) )
return (artists_file, genres_file, lyrics_file)
def UpperCAmelCase ( self , A , A , A ) -> List[Any]:
snake_case : Optional[int] = self.artists_decoder.get(A )
snake_case : Optional[Any] = [self.genres_decoder.get(A ) for genre in genres_index]
snake_case : Optional[int] = [self.lyrics_decoder.get(A ) for character in lyric_index]
return artist, genres, lyrics
| 684 | 0 |
from math import log
from scipy.constants import Boltzmann, physical_constants
UpperCamelCase__ : Any = 3_00 # TEMPERATURE (unit = K)
def __UpperCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float , ) -> float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
UpperCamelCase__ : Dict = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
UpperCamelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'https://pypi.org/pypi/diffusers/json'
SCREAMING_SNAKE_CASE_ : Optional[int] = json.loads(request.urlopen(lowerCamelCase_ ).read() )['releases'].keys()
return sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : version.Version(lowerCamelCase_ ) )
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowerCamelCase_ )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Path(lowerCamelCase_ ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] ) -> Any:
"""simple docstring"""
init_hf_modules()
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : List[Any] = f.read()
# Imports of the form `import .xxx`
SCREAMING_SNAKE_CASE_ : Tuple = re.findall('^\s*import\s+\.(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Unique-ify
return list(set(lowerCamelCase_ ) )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [module_file]
SCREAMING_SNAKE_CASE_ : Tuple = []
# Let's recurse through all relative imports
while not no_change:
SCREAMING_SNAKE_CASE_ : int = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ).parent
SCREAMING_SNAKE_CASE_ : int = [str(module_path / m ) for m in new_imports]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [f for f in new_import_files if f not in all_relative_imports]
SCREAMING_SNAKE_CASE_ : Any = [F'{f}.py' for f in new_import_files]
SCREAMING_SNAKE_CASE_ : Optional[int] = len(lowerCamelCase_ ) == 0
all_relative_imports.extend(lowerCamelCase_ )
return all_relative_imports
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[int] = f.read()
# Imports of the form `import xxx`
SCREAMING_SNAKE_CASE_ : List[str] = re.findall('^\s*import\s+(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Only keep the top-level module
SCREAMING_SNAKE_CASE_ : List[str] = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(set(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : List[str] = []
for imp in imports:
try:
importlib.import_module(lowerCamelCase_ )
except ImportError:
missing_packages.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
F'{", ".join(lowerCamelCase_ )}. Run `pip install {" ".join(lowerCamelCase_ )}`' )
return get_relative_imports(lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = module_path.replace(os.path.sep , '.' )
SCREAMING_SNAKE_CASE_ : Any = importlib.import_module(lowerCamelCase_ )
if class_name is None:
return find_pipeline_class(lowerCamelCase_ )
return getattr(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
from ..pipelines import DiffusionPipeline
SCREAMING_SNAKE_CASE_ : List[Any] = dict(inspect.getmembers(lowerCamelCase_ , inspect.isclass ) )
SCREAMING_SNAKE_CASE_ : List[str] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , lowerCamelCase_ )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
F' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
F' {loaded_module}.' )
SCREAMING_SNAKE_CASE_ : Any = cls
return pipeline_class
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if os.path.isfile(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = module_file_or_url
SCREAMING_SNAKE_CASE_ : Dict = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
SCREAMING_SNAKE_CASE_ : List[str] = get_diffusers_versions()
# cut ".dev0"
SCREAMING_SNAKE_CASE_ : Dict = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
SCREAMING_SNAKE_CASE_ : List[Any] = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(F'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
SCREAMING_SNAKE_CASE_ : int = F'v{revision}'
elif revision == "main":
SCREAMING_SNAKE_CASE_ : List[Any] = revision
else:
raise ValueError(
F'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
F' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
SCREAMING_SNAKE_CASE_ : Tuple = COMMUNITY_PIPELINES_URL.format(revision=lowerCamelCase_ , pipeline=lowerCamelCase_ )
try:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = cached_download(
lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Tuple = 'git'
SCREAMING_SNAKE_CASE_ : Dict = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE_ : List[str] = hf_hub_download(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
SCREAMING_SNAKE_CASE_ : Dict = check_imports(lowerCamelCase_ )
# Now we move the module inside our cached dynamic modules.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = Path(lowerCamelCase_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
for module_needed in modules_needed:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F'{module_needed}.py'
shutil.copy(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Tuple = use_auth_token
elif use_auth_token is True:
SCREAMING_SNAKE_CASE_ : int = HfFolder.get_token()
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[Any] = model_info(lowerCamelCase_ , revision=lowerCamelCase_ , token=lowerCamelCase_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
SCREAMING_SNAKE_CASE_ : Any = submodule_path / commit_hash
SCREAMING_SNAKE_CASE_ : List[Any] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowerCamelCase_ )
if not (submodule_path / module_file).exists():
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowerCamelCase_ , F'{module_needed}.py' , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return os.path.join(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , **lowerCamelCase_ : Dict , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = get_cached_module_file(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return get_class_in_module(lowerCamelCase_ , final_module.replace('.py' , '' ) )
| 685 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : List[Any] = ["image_processor", "tokenizer"]
__a : List[Any] = "ChineseCLIPImageProcessor"
__a : Tuple = ("BertTokenizer", "BertTokenizerFast")
def __init__( self ,snake_case__=None ,snake_case__=None ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' ,snake_case__ ,)
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE_ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : int = self.image_processor
def __call__( self ,snake_case__=None ,snake_case__=None ,snake_case__=None ,**snake_case__ ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tokenizer(snake_case__ ,return_tensors=snake_case__ ,**snake_case__ )
if images is not None:
SCREAMING_SNAKE_CASE_ : Tuple = self.image_processor(snake_case__ ,return_tensors=snake_case__ ,**snake_case__ )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE_ : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case__ ) ,tensor_type=snake_case__ )
def snake_case ( self ,*snake_case__ ,**snake_case__ ):
return self.tokenizer.batch_decode(*snake_case__ ,**snake_case__ )
def snake_case ( self ,*snake_case__ ,**snake_case__ ):
return self.tokenizer.decode(*snake_case__ ,**snake_case__ )
@property
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def snake_case ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' ,snake_case__ ,)
return self.image_processor_class
| 685 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Optional[int] = "visual_bert"
def __init__( self ,snake_case__=30522 ,snake_case__=768 ,snake_case__=512 ,snake_case__=12 ,snake_case__=12 ,snake_case__=3072 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=1E-12 ,snake_case__=False ,snake_case__=True ,snake_case__=1 ,snake_case__=0 ,snake_case__=2 ,**snake_case__ ,):
super().__init__(pad_token_id=snake_case__ ,bos_token_id=snake_case__ ,eos_token_id=snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = visual_embedding_dim
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : int = bypass_transformer
SCREAMING_SNAKE_CASE_ : Optional[Any] = special_visual_initialize
| 685 | 1 |
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCamelCase__ : Optional[int] = logging.getLogger(__name__)
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Optional[Any] = "masked_bert"
def __init__( self ,snake_case__=30522 ,snake_case__=768 ,snake_case__=12 ,snake_case__=12 ,snake_case__=3072 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=1E-12 ,snake_case__=0 ,snake_case__="topK" ,snake_case__="constant" ,snake_case__=0.0 ,**snake_case__ ,):
super().__init__(pad_token_id=snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : Tuple = hidden_size
SCREAMING_SNAKE_CASE_ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : int = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = hidden_act
SCREAMING_SNAKE_CASE_ : str = intermediate_size
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : int = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Optional[Any] = pruning_method
SCREAMING_SNAKE_CASE_ : str = mask_init
SCREAMING_SNAKE_CASE_ : Union[str, Any] = mask_scale
| 685 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Union[str, Any]:
"""simple docstring"""
def is_in_circle(lowerCamelCase_ : float , lowerCamelCase_ : float ) -> bool:
SCREAMING_SNAKE_CASE_ : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
SCREAMING_SNAKE_CASE_ : Optional[int] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase_ ) )
# The ratio of the area for circle to square is pi/4.
SCREAMING_SNAKE_CASE_ : Tuple = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Callable[[float], float] , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 , ) -> float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(lowerCamelCase_ , lowerCamelCase_ ) ) for _ in range(lowerCamelCase_ ) ) * (max_value - min_value)
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 ) -> None:
"""simple docstring"""
def identity_function(lowerCamelCase_ : float ) -> float:
return x
SCREAMING_SNAKE_CASE_ : str = area_under_curve_estimator(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print('******************' )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> None:
"""simple docstring"""
def function_to_integrate(lowerCamelCase_ : float ) -> float:
return sqrt(4.0 - x * x )
SCREAMING_SNAKE_CASE_ : Dict = area_under_curve_estimator(
lowerCamelCase_ , lowerCamelCase_ , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : str = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Dict = "deformable_detr"
__a : Tuple = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self ,snake_case__=True ,snake_case__=None ,snake_case__=3 ,snake_case__=300 ,snake_case__=1024 ,snake_case__=6 ,snake_case__=1024 ,snake_case__=8 ,snake_case__=6 ,snake_case__=1024 ,snake_case__=8 ,snake_case__=0.0 ,snake_case__=True ,snake_case__="relu" ,snake_case__=256 ,snake_case__=0.1 ,snake_case__=0.0 ,snake_case__=0.0 ,snake_case__=0.02 ,snake_case__=1.0 ,snake_case__=True ,snake_case__=False ,snake_case__="sine" ,snake_case__="resnet50" ,snake_case__=True ,snake_case__=False ,snake_case__=4 ,snake_case__=4 ,snake_case__=4 ,snake_case__=False ,snake_case__=300 ,snake_case__=False ,snake_case__=1 ,snake_case__=5 ,snake_case__=2 ,snake_case__=1 ,snake_case__=1 ,snake_case__=5 ,snake_case__=2 ,snake_case__=0.1 ,snake_case__=0.25 ,snake_case__=False ,**snake_case__ ,):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
SCREAMING_SNAKE_CASE_ : Tuple = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = backbone_config.get('model_type' )
SCREAMING_SNAKE_CASE_ : Tuple = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_ : List[Any] = config_class.from_dict(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = use_timm_backbone
SCREAMING_SNAKE_CASE_ : Any = backbone_config
SCREAMING_SNAKE_CASE_ : List[str] = num_channels
SCREAMING_SNAKE_CASE_ : List[Any] = num_queries
SCREAMING_SNAKE_CASE_ : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Optional[int] = d_model
SCREAMING_SNAKE_CASE_ : Union[str, Any] = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ : List[Any] = encoder_layers
SCREAMING_SNAKE_CASE_ : Optional[Any] = encoder_attention_heads
SCREAMING_SNAKE_CASE_ : Dict = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ : Optional[Any] = decoder_layers
SCREAMING_SNAKE_CASE_ : int = decoder_attention_heads
SCREAMING_SNAKE_CASE_ : Any = dropout
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_dropout
SCREAMING_SNAKE_CASE_ : Union[str, Any] = activation_dropout
SCREAMING_SNAKE_CASE_ : Optional[int] = activation_function
SCREAMING_SNAKE_CASE_ : Tuple = init_std
SCREAMING_SNAKE_CASE_ : Optional[int] = init_xavier_std
SCREAMING_SNAKE_CASE_ : List[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE_ : str = auxiliary_loss
SCREAMING_SNAKE_CASE_ : str = position_embedding_type
SCREAMING_SNAKE_CASE_ : Optional[int] = backbone
SCREAMING_SNAKE_CASE_ : List[str] = use_pretrained_backbone
SCREAMING_SNAKE_CASE_ : Any = dilation
# deformable attributes
SCREAMING_SNAKE_CASE_ : Optional[int] = num_feature_levels
SCREAMING_SNAKE_CASE_ : Any = encoder_n_points
SCREAMING_SNAKE_CASE_ : Dict = decoder_n_points
SCREAMING_SNAKE_CASE_ : List[str] = two_stage
SCREAMING_SNAKE_CASE_ : Optional[Any] = two_stage_num_proposals
SCREAMING_SNAKE_CASE_ : Union[str, Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
SCREAMING_SNAKE_CASE_ : List[str] = class_cost
SCREAMING_SNAKE_CASE_ : Optional[int] = bbox_cost
SCREAMING_SNAKE_CASE_ : Tuple = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE_ : Optional[Any] = mask_loss_coefficient
SCREAMING_SNAKE_CASE_ : List[Any] = dice_loss_coefficient
SCREAMING_SNAKE_CASE_ : List[str] = bbox_loss_coefficient
SCREAMING_SNAKE_CASE_ : Union[str, Any] = giou_loss_coefficient
SCREAMING_SNAKE_CASE_ : List[str] = eos_coefficient
SCREAMING_SNAKE_CASE_ : Any = focal_alpha
SCREAMING_SNAKE_CASE_ : Tuple = disable_custom_kernels
super().__init__(is_encoder_decoder=snake_case__ ,**snake_case__ )
@property
def snake_case ( self ):
return self.encoder_attention_heads
@property
def snake_case ( self ):
return self.d_model
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
SCREAMING_SNAKE_CASE_ : Tuple = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE_ : List[str] = self.__class__.model_type
return output
| 685 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self ,snake_case__ ,snake_case__=7 ,snake_case__=3 ,snake_case__=18 ,snake_case__=30 ,snake_case__=400 ,snake_case__=True ,snake_case__=None ,snake_case__=True ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE_ : str = parent
SCREAMING_SNAKE_CASE_ : List[str] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = num_channels
SCREAMING_SNAKE_CASE_ : Dict = image_size
SCREAMING_SNAKE_CASE_ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE_ : int = max_resolution
SCREAMING_SNAKE_CASE_ : Dict = do_resize
SCREAMING_SNAKE_CASE_ : Dict = size
SCREAMING_SNAKE_CASE_ : str = apply_ocr
def snake_case ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Dict = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = LayoutLMvaImageProcessingTester(self )
@property
def snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ ,'do_resize' ) )
self.assertTrue(hasattr(snake_case__ ,'size' ) )
self.assertTrue(hasattr(snake_case__ ,'apply_ocr' ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} )
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
def snake_case ( self ):
pass
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
self.assertIsInstance(encoding.words ,snake_case__ )
self.assertIsInstance(encoding.boxes ,snake_case__ )
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Tuple = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# with apply_OCR = True
SCREAMING_SNAKE_CASE_ : Tuple = LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_dataset('hf-internal-testing/fixtures_docvqa' ,split='test' )
SCREAMING_SNAKE_CASE_ : str = Image.open(ds[0]['file'] ).convert('RGB' )
SCREAMING_SNAKE_CASE_ : Any = image_processing(snake_case__ ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE_ : Any = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
SCREAMING_SNAKE_CASE_ : Any = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,snake_case__ )
self.assertListEqual(encoding.boxes ,snake_case__ )
# with apply_OCR = False
SCREAMING_SNAKE_CASE_ : Optional[int] = LayoutLMvaImageProcessor(apply_ocr=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
| 685 | 1 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ : List[str] = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : List[Any] = PegasusTokenizer
__a : List[str] = PegasusTokenizerFast
__a : List[str] = True
__a : List[Any] = True
def snake_case ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PegasusTokenizer(snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case ( self ):
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def snake_case ( self ,**snake_case__ ):
return PegasusTokenizer.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,snake_case__ ):
return ("This is a test", "This is a test")
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = '</s>'
SCREAMING_SNAKE_CASE_ : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) ,snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<pad>' )
self.assertEqual(vocab_keys[1] ,'</s>' )
self.assertEqual(vocab_keys[-1] ,'v' )
self.assertEqual(len(snake_case__ ) ,1103 )
def snake_case ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,1103 )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : List[Any] = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
SCREAMING_SNAKE_CASE_ : Tuple = rust_tokenizer([raw_input_str] ,return_tensors=snake_case__ ,add_special_tokens=snake_case__ ).input_ids[0]
SCREAMING_SNAKE_CASE_ : Any = py_tokenizer([raw_input_str] ,return_tensors=snake_case__ ,add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
SCREAMING_SNAKE_CASE_ : Tuple = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
SCREAMING_SNAKE_CASE_ : Dict = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer([raw_input_str] ,return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
SCREAMING_SNAKE_CASE_ : List[str] = 'To ensure a smooth flow of bank resolutions.'
SCREAMING_SNAKE_CASE_ : int = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
SCREAMING_SNAKE_CASE_ : str = tokenizer([raw_input_str] ,return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ ,snake_case__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = ['This is going to be way too long.' * 150, 'short example']
SCREAMING_SNAKE_CASE_ : Dict = ['not super long but more than 5 tokens', 'tiny']
SCREAMING_SNAKE_CASE_ : Any = self._large_tokenizer(snake_case__ ,padding=snake_case__ ,truncation=snake_case__ ,return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : List[str] = self._large_tokenizer(
text_target=snake_case__ ,max_length=5 ,padding=snake_case__ ,truncation=snake_case__ ,return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
@slow
def snake_case ( self ):
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'input_ids': [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ ,model_name='google/bigbird-pegasus-large-arxiv' ,revision='ba85d0851d708441f91440d509690f1ab6353415' ,)
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Union[str, Any] = PegasusTokenizer
__a : List[str] = PegasusTokenizerFast
__a : Dict = True
__a : str = True
def snake_case ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_ : List[str] = PegasusTokenizer(snake_case__ ,offset=0 ,mask_token_sent=snake_case__ ,mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case ( self ):
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def snake_case ( self ,**snake_case__ ):
return PegasusTokenizer.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,snake_case__ ):
return ("This is a test", "This is a test")
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : Optional[int] = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
SCREAMING_SNAKE_CASE_ : str = rust_tokenizer([raw_input_str] ,return_tensors=snake_case__ ,add_special_tokens=snake_case__ ).input_ids[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = py_tokenizer([raw_input_str] ,return_tensors=snake_case__ ,add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ ,snake_case__ )
@require_torch
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = ['This is going to be way too long.' * 1000, 'short example']
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['not super long but more than 5 tokens', 'tiny']
SCREAMING_SNAKE_CASE_ : str = self._large_tokenizer(snake_case__ ,padding=snake_case__ ,truncation=snake_case__ ,return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Any = self._large_tokenizer(
text_target=snake_case__ ,max_length=5 ,padding=snake_case__ ,truncation=snake_case__ ,return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
SCREAMING_SNAKE_CASE_ : Any = self._large_tokenizer(snake_case__ ).input_ids
self.assertListEqual(
snake_case__ ,[182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] ,)
| 685 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
UpperCamelCase__ : str = logging.getLogger(__name__)
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase_ :
__a : str
__a : str
__a : Optional[str] = None
__a : Optional[str] = None
__a : Optional[str] = None
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase_ :
__a : List[int]
__a : Optional[List[int]] = None
__a : Optional[List[int]] = None
__a : Optional[Union[int, float]] = None
__a : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : List[InputFeatures]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = None ,snake_case__=False ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = hans_processors[task]()
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(
snake_case__ ,'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' ,tokenizer.__class__.__name__ ,str(snake_case__ ) ,snake_case__ ,) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE_ : Dict = cached_features_file + '.lock'
with FileLock(snake_case__ ):
if os.path.exists(snake_case__ ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.load(snake_case__ )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
SCREAMING_SNAKE_CASE_ : List[Any] = (
processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
)
logger.info('Training examples: %s' ,len(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[str] = hans_convert_examples_to_features(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
logger.info('Saving features into cached file %s' ,snake_case__ )
torch.save(self.features ,snake_case__ )
def __len__( self ):
return len(self.features )
def __getitem__( self ,snake_case__ ):
return self.features[i]
def snake_case ( self ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase_ :
__a : List[InputFeatures]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = 128 ,snake_case__=False ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : Optional[int] = hans_processors[task]()
SCREAMING_SNAKE_CASE_ : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list
SCREAMING_SNAKE_CASE_ : int = processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = hans_convert_examples_to_features(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(snake_case__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
SCREAMING_SNAKE_CASE_ : List[Any] = tf.data.Dataset.from_generator(
snake_case__ ,(
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) ,(
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) ,)
def snake_case ( self ):
return self.dataset
def __len__( self ):
return len(self.features )
def __getitem__( self ,snake_case__ ):
return self.features[i]
def snake_case ( self ):
return self.label_list
class lowerCAmelCase_ ( lowerCamelCase_ ):
def snake_case ( self ,snake_case__ ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ ,'heuristics_train_set.txt' ) ) ,'train' )
def snake_case ( self ,snake_case__ ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ ,'heuristics_evaluation_set.txt' ) ) ,'dev' )
def snake_case ( self ):
return ["contradiction", "entailment", "neutral"]
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for i, line in enumerate(snake_case__ ):
if i == 0:
continue
SCREAMING_SNAKE_CASE_ : List[str] = '%s-%s' % (set_type, line[0])
SCREAMING_SNAKE_CASE_ : Dict = line[5]
SCREAMING_SNAKE_CASE_ : Dict = line[6]
SCREAMING_SNAKE_CASE_ : Tuple = line[7][2:] if line[7].startswith('ex' ) else line[7]
SCREAMING_SNAKE_CASE_ : Optional[int] = line[0]
examples.append(InputExample(guid=snake_case__ ,text_a=snake_case__ ,text_b=snake_case__ ,label=snake_case__ ,pairID=snake_case__ ) )
return examples
def __UpperCAmelCase ( lowerCamelCase_ : List[InputExample] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : PreTrainedTokenizer , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase_ )}
SCREAMING_SNAKE_CASE_ : Dict = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase_ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
SCREAMING_SNAKE_CASE_ : Any = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCamelCase_ , max_length=lowerCamelCase_ , padding='max_length' , truncation=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : List[Any] = label_map[example.label] if example.label in label_map else 0
SCREAMING_SNAKE_CASE_ : List[str] = int(example.pairID )
features.append(InputFeatures(**lowerCamelCase_ , label=lowerCamelCase_ , pairID=lowerCamelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F'guid: {example}' )
logger.info(F'features: {features[i]}' )
return features
UpperCamelCase__ : str = {
'''hans''': 3,
}
UpperCamelCase__ : Dict = {
'''hans''': HansProcessor,
}
| 685 | 1 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = int(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = t // 36_00, (t // 60) % 60, t % 60
return F'{h}:{m:02d}:{s:02d}' if h != 0 else F'{m:02d}:{s:02d}'
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict , lowerCamelCase_ : List[str]=3_00 ) -> List[str]:
"""simple docstring"""
return F'\n <div>\n {prefix}\n <progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>\n {label}\n </div>\n '
def __UpperCAmelCase ( lowerCamelCase_ : Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F' <th>{i}</th>\n'
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
SCREAMING_SNAKE_CASE_ : Tuple = F'{elt:.6f}' if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else str(lowerCamelCase_ )
html_code += F' <td>{elt}</td>\n'
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class lowerCAmelCase_ :
__a : Optional[Any] = 5
__a : int = 0.2
def __init__( self ,snake_case__ ,snake_case__ = None ,snake_case__ = True ,snake_case__ = None ,snake_case__ = 300 ,):
SCREAMING_SNAKE_CASE_ : str = total
SCREAMING_SNAKE_CASE_ : int = '' if prefix is None else prefix
SCREAMING_SNAKE_CASE_ : List[str] = leave
SCREAMING_SNAKE_CASE_ : Optional[Any] = parent
SCREAMING_SNAKE_CASE_ : List[Any] = width
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
SCREAMING_SNAKE_CASE_ : int = None
SCREAMING_SNAKE_CASE_ : List[Any] = None
def snake_case ( self ,snake_case__ ,snake_case__ = False ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : str = value
if comment is not None:
SCREAMING_SNAKE_CASE_ : int = comment
if self.last_value is None:
SCREAMING_SNAKE_CASE_ : Optional[int] = time.time()
SCREAMING_SNAKE_CASE_ : Dict = value
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Tuple = self.warmup
SCREAMING_SNAKE_CASE_ : List[Any] = 1
self.update_bar(snake_case__ )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for ,self.total ):
if self.first_calls > 0:
self.first_calls -= 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = time.time()
SCREAMING_SNAKE_CASE_ : Optional[Any] = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
SCREAMING_SNAKE_CASE_ : Any = self.elapsed_time / (value - self.start_value)
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
if value >= self.total:
SCREAMING_SNAKE_CASE_ : Dict = self.total
SCREAMING_SNAKE_CASE_ : Tuple = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = self.average_time_per_item * (self.total - value)
self.update_bar(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = value
SCREAMING_SNAKE_CASE_ : int = current_time
if self.average_time_per_item is None:
SCREAMING_SNAKE_CASE_ : int = 1
else:
SCREAMING_SNAKE_CASE_ : Any = max(int(self.update_every / self.average_time_per_item ) ,1 )
def snake_case ( self ,snake_case__ ,snake_case__=None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = ' ' * (len(str(self.total ) ) - len(str(snake_case__ ) )) + str(snake_case__ )
if self.elapsed_time is None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = F'[{spaced_value}/{self.total} : < :'
elif self.predicted_remaining is None:
SCREAMING_SNAKE_CASE_ : Tuple = F'[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'
else:
SCREAMING_SNAKE_CASE_ : str = (
F'[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'
F' {format_time(self.predicted_remaining )}'
)
self.label += F', {1/self.average_time_per_item:.2f} it/s'
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F', {self.comment}]'
self.display()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = html_progress_bar(self.value ,self.total ,self.prefix ,self.label ,self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
SCREAMING_SNAKE_CASE_ : Tuple = disp.display(disp.HTML(self.html_code ) ,display_id=snake_case__ )
else:
self.output.update(disp.HTML(self.html_code ) )
def snake_case ( self ):
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('' ) )
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,snake_case__ ,snake_case__=None ):
super().__init__(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = None if column_names is None else [column_names]
SCREAMING_SNAKE_CASE_ : Dict = None
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = html_progress_bar(self.value ,self.total ,self.prefix ,self.label ,self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
SCREAMING_SNAKE_CASE_ : List[Any] = disp.display(disp.HTML(self.html_code ) ,display_id=snake_case__ )
else:
self.output.update(disp.HTML(self.html_code ) )
def snake_case ( self ,snake_case__ ):
if self.inner_table is None:
SCREAMING_SNAKE_CASE_ : Any = [list(values.keys() ), list(values.values() )]
else:
SCREAMING_SNAKE_CASE_ : Tuple = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = columns
self.inner_table.append([values[c] for c in columns] )
def snake_case ( self ,snake_case__ ,snake_case__=None ,snake_case__=300 ):
SCREAMING_SNAKE_CASE_ : Dict = NotebookProgressBar(snake_case__ ,prefix=snake_case__ ,parent=self ,width=snake_case__ )
return self.child_bar
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = None
self.display()
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ):
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : int = False
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
SCREAMING_SNAKE_CASE_ : List[Any] = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
SCREAMING_SNAKE_CASE_ : str = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss' )
SCREAMING_SNAKE_CASE_ : Tuple = NotebookTrainingTracker(state.max_steps ,snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = int(state.epoch ) if int(state.epoch ) == state.epoch else F'{state.epoch:.2f}'
self.training_tracker.update(
state.global_step + 1 ,comment=F'Epoch {epoch}/{state.num_train_epochs}' ,force_update=self._force_next_update ,)
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=None ,**snake_case__ ):
if not has_length(snake_case__ ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.training_tracker.add_child(len(snake_case__ ) )
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = NotebookProgressBar(len(snake_case__ ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,**snake_case__ ):
if self.prediction_bar is not None:
self.prediction_bar.close()
SCREAMING_SNAKE_CASE_ : str = None
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=None ,**snake_case__ ):
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
SCREAMING_SNAKE_CASE_ : List[Any] = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
SCREAMING_SNAKE_CASE_ : Any = state.global_step
self.training_tracker.write_line(snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=None ,**snake_case__ ):
if self.training_tracker is not None:
SCREAMING_SNAKE_CASE_ : Dict = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history ):
if "loss" in log:
SCREAMING_SNAKE_CASE_ : List[str] = log['loss']
break
if self.first_column == "Epoch":
SCREAMING_SNAKE_CASE_ : Tuple = int(state.epoch )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = state.global_step
SCREAMING_SNAKE_CASE_ : str = 'eval'
for k in metrics:
if k.endswith('_loss' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.sub(R'\_loss$' ,'' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = metrics.pop('total_flos' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = metrics.pop('epoch' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = metrics.pop(F'{metric_key_prefix}_runtime' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = metrics.pop(F'{metric_key_prefix}_samples_per_second' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = metrics.pop(F'{metric_key_prefix}_steps_per_second' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = metrics.pop(F'{metric_key_prefix}_jit_compilation_time' ,snake_case__ )
for k, v in metrics.items():
if k == F'{metric_key_prefix}_loss':
SCREAMING_SNAKE_CASE_ : Tuple = v
else:
SCREAMING_SNAKE_CASE_ : List[str] = k.split('_' )
SCREAMING_SNAKE_CASE_ : List[str] = ' '.join([part.capitalize() for part in splits[1:]] )
SCREAMING_SNAKE_CASE_ : int = v
self.training_tracker.write_line(snake_case__ )
self.training_tracker.remove_child()
SCREAMING_SNAKE_CASE_ : str = None
# Evaluation takes a long time so we should force the next update.
SCREAMING_SNAKE_CASE_ : str = True
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,**snake_case__ ):
self.training_tracker.update(
state.global_step ,comment=F'Epoch {int(state.epoch )}/{state.num_train_epochs}' ,force_update=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = None
| 685 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 1_00 * 2**20, 9_00 * 2**20] )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] ) -> int:
"""simple docstring"""
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
SCREAMING_SNAKE_CASE_ : str = dataset_size < in_memory_max_size
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = is_small_dataset(lowerCamelCase_ )
assert result == expected
| 685 | 1 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : List[str] , lowerCamelCase_ : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = MobileBertConfig.from_json_file(lowerCamelCase_ )
print(F'Building PyTorch model from configuration: {config}' )
SCREAMING_SNAKE_CASE_ : Tuple = MobileBertForPreTraining(lowerCamelCase_ )
# Load weights from tf checkpoint
SCREAMING_SNAKE_CASE_ : Tuple = load_tf_weights_in_mobilebert(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowerCamelCase_ )
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase__ : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 685 |
from math import log
from scipy.constants import Boltzmann, physical_constants
UpperCamelCase__ : Any = 3_00 # TEMPERATURE (unit = K)
def __UpperCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float , ) -> float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 1 |
def __UpperCAmelCase ( lowerCamelCase_ : list[list] ) -> list[list]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = current_set.copy()
for row_index, row in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Any = row[0]
for column_index, column in enumerate(lowerCamelCase_ ):
if magnitude == 0:
SCREAMING_SNAKE_CASE_ : str = column
continue
SCREAMING_SNAKE_CASE_ : Tuple = column / magnitude
# Subtract to cancel term
SCREAMING_SNAKE_CASE_ : List[Any] = current_set[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = [first_row]
SCREAMING_SNAKE_CASE_ : Optional[int] = current_set[1::]
for row in current_set:
SCREAMING_SNAKE_CASE_ : Tuple = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCamelCase_ )
continue
for column_index in range(len(lowerCamelCase_ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCamelCase_ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
SCREAMING_SNAKE_CASE_ : Tuple = final_set[0]
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
SCREAMING_SNAKE_CASE_ : Optional[int] = simplify(lowerCamelCase_ )
for i in range(len(lowerCamelCase_ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = resultant
return final_set
def __UpperCAmelCase ( lowerCamelCase_ : list[list] ) -> list:
"""simple docstring"""
if len(lowerCamelCase_ ) == 0:
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
SCREAMING_SNAKE_CASE_ : Tuple = len(lowerCamelCase_ ) + 1
if any(len(lowerCamelCase_ ) != _length for item in equations ):
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
for row in equations:
if any(not isinstance(lowerCamelCase_ , (int, float) ) for column in row ):
raise ValueError('solve_simultaneous() requires lists of integers' )
if len(lowerCamelCase_ ) == 1:
return [equations[0][-1] / equations[0][0]]
SCREAMING_SNAKE_CASE_ : int = equations.copy()
if any(0 in row for row in data_set ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = data_set.copy()
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for row_index, row in enumerate(lowerCamelCase_ ):
if 0 not in row:
SCREAMING_SNAKE_CASE_ : int = data_set.pop(lowerCamelCase_ )
break
if not full_row:
raise ValueError('solve_simultaneous() requires at least 1 full equation' )
data_set.insert(0 , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = data_set.copy()
SCREAMING_SNAKE_CASE_ : str = simplify(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = simplified[::-1]
SCREAMING_SNAKE_CASE_ : list = []
for row in simplified:
SCREAMING_SNAKE_CASE_ : int = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
SCREAMING_SNAKE_CASE_ : Dict = row.copy()[: len(lowerCamelCase_ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCamelCase_ ) == 0:
solutions.append(0 )
continue
SCREAMING_SNAKE_CASE_ : int = temp_row[1::]
SCREAMING_SNAKE_CASE_ : Dict = temp_row[::-1]
for column_index, column in enumerate(lowerCamelCase_ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = []
for item in solutions:
final.append(float(round(lowerCamelCase_ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ : List[Any] = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 685 |
class lowerCAmelCase_ ( lowerCamelCase_ ):
pass
class lowerCAmelCase_ ( lowerCamelCase_ ):
pass
class lowerCAmelCase_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[],
[],
[],
]
def snake_case ( self ,snake_case__ ,snake_case__ ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(snake_case__ )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def snake_case ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self ):
return "\n".join(F'Priority {i}: {q}' for i, q in enumerate(self.queues ) )
class lowerCAmelCase_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : List[str] = []
def snake_case ( self ,snake_case__ ):
if len(self.queue ) == 100:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(snake_case__ )
def snake_case ( self ):
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = min(self.queue )
self.queue.remove(snake_case__ )
return data
def __str__( self ):
return str(self.queue )
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(lowerCamelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowerCamelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(lowerCamelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowerCamelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 685 | 1 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
UpperCamelCase__ : int = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
UpperCamelCase__ : str = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : List[Any] = collections.OrderedDict()
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Any = f.readlines()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = b
SCREAMING_SNAKE_CASE_ : Dict = idx
for wd in b:
SCREAMING_SNAKE_CASE_ : Any = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Union[str, Any] = VOCAB_FILES_NAMES
__a : List[str] = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__="<|endoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__="<|startoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__=False ,**snake_case__ ,):
super().__init__(
unk_token=snake_case__ ,pad_token=snake_case__ ,bos_token=snake_case__ ,eos_token=snake_case__ ,do_clean_text=snake_case__ ,**snake_case__ ,)
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
SCREAMING_SNAKE_CASE_ : str = do_clean_text
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = load_vocab_and_emoji(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = SubWordJapaneseTokenizer(
vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji )
@property
def snake_case ( self ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def snake_case ( self ):
return dict(self.raw_vocab ,**self.added_tokens_encoder )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.tokenize(snake_case__ ,clean=self.do_clean_text )
def snake_case ( self ,snake_case__ ):
return self.vocab.get(snake_case__ ,self.vocab.get(self.unk_token ) )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.convert_id_to_token(snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = ''.join(snake_case__ ).strip()
return out_string
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case__ ,add_special_tokens=snake_case__ ) + [self.eos_token_id] )
if len(snake_case__ ) > self.model_max_length:
SCREAMING_SNAKE_CASE_ : List[Any] = input_ids[-self.model_max_length :]
return input_ids
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
if os.path.isdir(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
SCREAMING_SNAKE_CASE_ : Tuple = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
SCREAMING_SNAKE_CASE_ : str = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
SCREAMING_SNAKE_CASE_ : Dict = token_index
writer.write(','.join(snake_case__ ) + '\n' )
index += 1
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
json.dump(self.emoji ,snake_case__ )
return vocab_file, emoji_file
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = vocab # same as swe
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_to_tokens # same as bpe
SCREAMING_SNAKE_CASE_ : Dict = emoji
SCREAMING_SNAKE_CASE_ : int = np.max([len(snake_case__ ) for w in self.vocab.keys()] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
SCREAMING_SNAKE_CASE_ : str = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
SCREAMING_SNAKE_CASE_ : int = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
SCREAMING_SNAKE_CASE_ : Tuple = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self ):
return len(self.ids_to_tokens )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<URL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = self.content_repattera.sub('<EMAIL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<TEL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<PRICE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = content.replace('<BLOCK><BLOCK>' ,'<BLOCK>' )
return content
def snake_case ( self ,snake_case__ ,snake_case__=False ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('\r\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\r' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : List[str] = text.replace('\t' ,'<TAB>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('—' ,'ー' )
SCREAMING_SNAKE_CASE_ : Optional[int] = text.replace('−' ,'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
SCREAMING_SNAKE_CASE_ : int = text.replace(snake_case__ ,snake_case__ )
if clean:
SCREAMING_SNAKE_CASE_ : str = self.clean_text(snake_case__ )
def check_simbol(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 2:
SCREAMING_SNAKE_CASE_ : str = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2A1 and c <= 0XC2BF)
or (c >= 0XC780 and c <= 0XC783)
or (c >= 0XCAB9 and c <= 0XCBBF)
or (c >= 0XCC80 and c <= 0XCDA2)
):
return True
return False
def checkuae(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 3:
SCREAMING_SNAKE_CASE_ : Dict = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE2_8080 and c <= 0XE2_B07F:
return True
return False
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : List[Any] = []
while pos < len(snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = min(len(snake_case__ ) ,pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
SCREAMING_SNAKE_CASE_ : List[Any] = [] # (token_id, token, pos)
for e in range(snake_case__ ,snake_case__ ,-1 ):
SCREAMING_SNAKE_CASE_ : str = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(snake_case__ ) > 2:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(snake_case__ ) > 0:
# the smallest token_id is adopted
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = sorted(snake_case__ ,key=lambda snake_case__ : x[0] )[0]
result.append(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = e
else:
SCREAMING_SNAKE_CASE_ : Any = pos + 1
SCREAMING_SNAKE_CASE_ : Optional[int] = text[pos:end]
if check_simbol(snake_case__ ):
result.append('<KIGOU>' )
elif checkuae(snake_case__ ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
SCREAMING_SNAKE_CASE_ : int = end
return result
def snake_case ( self ,snake_case__ ,snake_case__="\n" ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : Dict = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : Dict = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(snake_case__ )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(snake_case__ )
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : int = ''.join(snake_case__ )
return text
| 685 |
def __UpperCAmelCase ( lowerCamelCase_ : int = 10_00 ) -> int:
"""simple docstring"""
return sum(e for e in range(3 , lowerCamelCase_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 685 | 1 |
from string import ascii_uppercase
UpperCamelCase__ : Dict = {char: i for i, char in enumerate(ascii_uppercase)}
UpperCamelCase__ : Any = dict(enumerate(ascii_uppercase))
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = 0
while True:
if x == i:
SCREAMING_SNAKE_CASE_ : int = 0
if len(lowerCamelCase_ ) == len(lowerCamelCase_ ):
break
key += key[i]
i += 1
return key
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ''
SCREAMING_SNAKE_CASE_ : Tuple = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
SCREAMING_SNAKE_CASE_ : Tuple = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ''
SCREAMING_SNAKE_CASE_ : Tuple = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
SCREAMING_SNAKE_CASE_ : List[str] = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 'THE GERMAN ATTACK'
SCREAMING_SNAKE_CASE_ : str = 'SECRET'
SCREAMING_SNAKE_CASE_ : Optional[int] = generate_key(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = cipher_text(lowerCamelCase_ , lowerCamelCase_ )
print(F'Encrypted Text = {s}' )
print(F'Original Text = {original_text(lowerCamelCase_ , lowerCamelCase_ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 685 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Tuple = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Any = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Dict = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
| 685 | 1 |
from __future__ import annotations
from random import choice
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] ) -> List[str]:
"""simple docstring"""
return choice(lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : list[int] , lowerCamelCase_ : int ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = random_pivot(lowerCamelCase_ )
# partition based on pivot
# linear time
SCREAMING_SNAKE_CASE_ : Dict = [e for e in lst if e < pivot]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(lowerCamelCase_ ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(lowerCamelCase_ ) < k - 1:
return kth_number(lowerCamelCase_ , k - len(lowerCamelCase_ ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCamelCase__ : Union[str, Any] = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Any = []
UpperCamelCase__ : Optional[Any] = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
UpperCamelCase__ : Any = [
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'''emoji''': True,
},
}
]
UpperCamelCase__ : Union[str, Any] = 0
for log in Path().glob('''*.log'''):
UpperCamelCase__ : Optional[int] = 0
with open(log, '''r''') as f:
for line in f:
UpperCamelCase__ : Any = json.loads(line)
if line.get('''nodeid''', '''''') != "":
UpperCamelCase__ : Tuple = line['''nodeid''']
if line.get('''duration''', None) is not None:
UpperCamelCase__ : List[Any] = F"""{line["duration"]:.4f}"""
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCamelCase__ : Tuple = []
log.unlink()
UpperCamelCase__ : List[Any] = ''''''
UpperCamelCase__ : List[str] = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : Optional[int] = {}
for test in failed_tests:
UpperCamelCase__ : str = test[0].split('''::''')
UpperCamelCase__ : List[Any] = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
UpperCamelCase__ : int = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCamelCase__ : str = [test[0] for test in failed_table]
UpperCamelCase__ : Union[str, Any] = list(set(files))
# Count number of instances in failed_tests
UpperCamelCase__ : Dict = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCamelCase__ : str = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 30_00:
UpperCamelCase__ : List[Any] = '''Too many failed tests, please see the full report in the Action results.'''
UpperCamelCase__ : Optional[Any] = len(err) + 10
UpperCamelCase__ : List[str] = message[: 30_00 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
UpperCamelCase__ : Optional[Any] = '''No failed tests! 🤗'''
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
UpperCamelCase__ : int = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
UpperCamelCase__ : Optional[int] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
UpperCamelCase__ : Optional[int] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
UpperCamelCase__ : Optional[Any] = {
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
UpperCamelCase__ : Tuple = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
UpperCamelCase__ : Any = response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCamelCase__ : int = ''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCamelCase__ : str = row[0]
else:
UpperCamelCase__ : str = ''''''
UpperCamelCase__ : Optional[Any] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 685 | 1 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
UpperCamelCase__ : Any = 3
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
print('Generating primitive root of p' )
while True:
SCREAMING_SNAKE_CASE_ : Tuple = random.randrange(3 , lowerCamelCase_ )
if pow(lowerCamelCase_ , 2 , lowerCamelCase_ ) == 1:
continue
if pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) == 1:
continue
return g
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
"""simple docstring"""
print('Generating prime p...' )
SCREAMING_SNAKE_CASE_ : List[Any] = rabin_miller.generate_large_prime(lowerCamelCase_ ) # select large prime number.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = primitive_root(lowerCamelCase_ ) # one primitive root on modulo p.
SCREAMING_SNAKE_CASE_ : List[str] = random.randrange(3 , lowerCamelCase_ ) # private_key -> have to be greater than 2 for safety.
SCREAMING_SNAKE_CASE_ : str = cryptomath.find_mod_inverse(pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = (key_size, e_a, e_a, p)
SCREAMING_SNAKE_CASE_ : Tuple = (key_size, d)
return public_key, private_key
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : int ) -> None:
"""simple docstring"""
if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ):
print('\nWARNING:' )
print(
F'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'Use a different name or delete these files and re-run this program.' )
sys.exit()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = generate_key(lowerCamelCase_ )
print(F'\nWriting public key to file {name}_pubkey.txt...' )
with open(F'{name}_pubkey.txt' , 'w' ) as fo:
fo.write(F'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' )
print(F'Writing private key to file {name}_privkey.txt...' )
with open(F'{name}_privkey.txt' , 'w' ) as fo:
fo.write(F'{private_key[0]},{private_key[1]}' )
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
print('Making key files...' )
make_key_files('elgamal' , 20_48 )
print('Key files generation successful' )
if __name__ == "__main__":
main()
| 685 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError('Input value must be an \'int\' type' )
SCREAMING_SNAKE_CASE_ : Tuple = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 1 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ : int = logging.get_logger(__name__)
UpperCamelCase__ : str = '''▁'''
UpperCamelCase__ : str = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
UpperCamelCase__ : Optional[int] = {
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
UpperCamelCase__ : Any = {
'''facebook/m2m100_418M''': 10_24,
}
# fmt: off
UpperCamelCase__ : List[Any] = {
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : str = VOCAB_FILES_NAMES
__a : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__a : int = ["input_ids", "attention_mask"]
__a : List[int] = []
__a : List[int] = []
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__=None ,snake_case__=None ,snake_case__="<s>" ,snake_case__="</s>" ,snake_case__="</s>" ,snake_case__="<pad>" ,snake_case__="<unk>" ,snake_case__="m2m100" ,snake_case__ = None ,snake_case__=8 ,**snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
SCREAMING_SNAKE_CASE_ : int = language_codes
SCREAMING_SNAKE_CASE_ : Tuple = FAIRSEQ_LANGUAGE_CODES[language_codes]
SCREAMING_SNAKE_CASE_ : List[str] = {lang_code: F'__{lang_code}__' for lang_code in fairseq_language_code}
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.get('additional_special_tokens' ,[] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(snake_case__ )
for lang_code in fairseq_language_code
if self.get_lang_token(snake_case__ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=snake_case__ ,tgt_lang=snake_case__ ,bos_token=snake_case__ ,eos_token=snake_case__ ,sep_token=snake_case__ ,unk_token=snake_case__ ,pad_token=snake_case__ ,language_codes=snake_case__ ,sp_model_kwargs=self.sp_model_kwargs ,num_madeup_words=snake_case__ ,**snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Dict = vocab_file
SCREAMING_SNAKE_CASE_ : int = load_json(snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_ : int = spm_file
SCREAMING_SNAKE_CASE_ : List[str] = load_spm(snake_case__ ,self.sp_model_kwargs )
SCREAMING_SNAKE_CASE_ : List[str] = len(self.encoder )
SCREAMING_SNAKE_CASE_ : Optional[int] = {
self.get_lang_token(snake_case__ ): self.encoder_size + i for i, lang_code in enumerate(snake_case__ )
}
SCREAMING_SNAKE_CASE_ : Dict = {lang_code: self.encoder_size + i for i, lang_code in enumerate(snake_case__ )}
SCREAMING_SNAKE_CASE_ : Optional[Any] = {v: k for k, v in self.lang_token_to_id.items()}
SCREAMING_SNAKE_CASE_ : int = src_lang if src_lang is not None else 'en'
SCREAMING_SNAKE_CASE_ : Optional[int] = tgt_lang
SCREAMING_SNAKE_CASE_ : Dict = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
SCREAMING_SNAKE_CASE_ : Dict = num_madeup_words
@property
def snake_case ( self ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def snake_case ( self ):
return self._src_lang
@src_lang.setter
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def snake_case ( self ,snake_case__ ):
return self.sp_model.encode(snake_case__ ,out_type=snake_case__ )
def snake_case ( self ,snake_case__ ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(snake_case__ ,self.encoder[self.unk_token] )
def snake_case ( self ,snake_case__ ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(snake_case__ ,self.unk_token )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : Tuple = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case__ ) + token
SCREAMING_SNAKE_CASE_ : Optional[int] = []
else:
current_sub_tokens.append(snake_case__ )
out_string += self.sp_model.decode(snake_case__ )
return out_string.strip()
def snake_case ( self ,snake_case__ ,snake_case__ = None ,snake_case__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ ,token_ids_a=snake_case__ ,already_has_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = [1] * len(self.prefix_tokens )
SCREAMING_SNAKE_CASE_ : Tuple = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case__ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case__ )) + ([0] * len(snake_case__ )) + suffix_ones
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Tuple = None
return state
def __setstate__( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE_ : str = {}
SCREAMING_SNAKE_CASE_ : int = load_spm(self.spm_file ,self.sp_model_kwargs )
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Tuple = Path(snake_case__ )
if not save_dir.is_dir():
raise OSError(F'{save_directory} should be a directory' )
SCREAMING_SNAKE_CASE_ : List[Any] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder ,snake_case__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file ,snake_case__ )
elif not os.path.isfile(self.spm_file ):
with open(snake_case__ ,'wb' ) as fi:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (str(snake_case__ ), str(snake_case__ ))
def snake_case ( self ,snake_case__ ,snake_case__ = "en" ,snake_case__ = None ,snake_case__ = "ro" ,**snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = src_lang
SCREAMING_SNAKE_CASE_ : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(snake_case__ ,snake_case__ ,**snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,**snake_case__ ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
SCREAMING_SNAKE_CASE_ : Optional[int] = src_lang
SCREAMING_SNAKE_CASE_ : Optional[Any] = self(snake_case__ ,add_special_tokens=snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.get_lang_id(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = tgt_lang_id
return inputs
def snake_case ( self ):
self.set_src_lang_special_tokens(self.src_lang )
def snake_case ( self ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = self.get_lang_token(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = self.lang_token_to_id[lang_token]
SCREAMING_SNAKE_CASE_ : str = [self.cur_lang_id]
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.eos_token_id]
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_lang_token(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = self.lang_token_to_id[lang_token]
SCREAMING_SNAKE_CASE_ : Any = [self.cur_lang_id]
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.eos_token_id]
def snake_case ( self ,snake_case__ ):
return self.lang_code_to_token[lang]
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = self.get_lang_token(snake_case__ )
return self.lang_token_to_id[lang_token]
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = sentencepiece.SentencePieceProcessor(**lowerCamelCase_ )
spm.Load(str(lowerCamelCase_ ) )
return spm
def __UpperCAmelCase ( lowerCamelCase_ : str ) -> Union[Dict, List]:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' ) as f:
return json.load(lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str ) -> None:
"""simple docstring"""
with open(lowerCamelCase_ , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ , indent=2 )
| 685 |
import qiskit
def __UpperCAmelCase ( lowerCamelCase_ : int = 2 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = qubits
# Using Aer's simulator
SCREAMING_SNAKE_CASE_ : Optional[int] = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE_ : str = qiskit.QuantumCircuit(lowerCamelCase_ , lowerCamelCase_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , lowerCamelCase_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , lowerCamelCase_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(lowerCamelCase_ ) ) , list(range(lowerCamelCase_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
SCREAMING_SNAKE_CASE_ : Tuple = qiskit.execute(lowerCamelCase_ , lowerCamelCase_ , shots=10_00 )
return job.result().get_counts(lowerCamelCase_ )
if __name__ == "__main__":
print(F"""Total count for various states are: {quantum_entanglement(3)}""")
| 685 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.