code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class _snake_case ( A__ ):
lowerCAmelCase :Optional[int] = '''big_bird'''
def __init__( self , _lowerCamelCase=5_0358 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu_new" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=4096 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-1_2 , _lowerCamelCase=True , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , _lowerCamelCase=66 , _lowerCamelCase="block_sparse" , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=64 , _lowerCamelCase=3 , _lowerCamelCase=None , **_lowerCamelCase , ):
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , sep_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
UpperCAmelCase__ : Tuple = vocab_size
UpperCAmelCase__ : Tuple = max_position_embeddings
UpperCAmelCase__ : Dict = hidden_size
UpperCAmelCase__ : Tuple = num_hidden_layers
UpperCAmelCase__ : Any = num_attention_heads
UpperCAmelCase__ : Optional[int] = intermediate_size
UpperCAmelCase__ : Any = hidden_act
UpperCAmelCase__ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase__ : int = attention_probs_dropout_prob
UpperCAmelCase__ : int = initializer_range
UpperCAmelCase__ : Union[str, Any] = type_vocab_size
UpperCAmelCase__ : Any = layer_norm_eps
UpperCAmelCase__ : Optional[Any] = use_cache
UpperCAmelCase__ : Optional[Any] = rescale_embeddings
UpperCAmelCase__ : str = attention_type
UpperCAmelCase__ : int = use_bias
UpperCAmelCase__ : str = block_size
UpperCAmelCase__ : str = num_random_blocks
UpperCAmelCase__ : List[str] = classifier_dropout
class _snake_case ( A__ ):
@property
def snake_case__ ( self):
if self.task == "multiple-choice":
UpperCAmelCase__ : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCAmelCase__ : List[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
]) | 163 |
'''simple docstring'''
import os
import sys
import unittest
_lowerCamelCase : Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_lowerCamelCase : str = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
_lowerCamelCase : Tuple = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : Any ):
A = get_test_to_tester_mapping(_lowerCAmelCase )
A = get_test_to_tester_mapping(_lowerCAmelCase )
A = {"""BertModelTest""": """BertModelTester"""}
A = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
def A (self : str ):
A = get_model_to_test_mapping(_lowerCAmelCase )
A = get_model_to_test_mapping(_lowerCAmelCase )
A = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
A = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
def A (self : Union[str, Any] ):
A = get_model_to_tester_mapping(_lowerCAmelCase )
A = get_model_to_tester_mapping(_lowerCAmelCase )
A = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
A = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
| 258 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : str ="""poolformer"""
def __init__( self : List[str] , UpperCamelCase : Tuple=3 , UpperCamelCase : Optional[Any]=16 , UpperCamelCase : List[Any]=16 , UpperCamelCase : List[Any]=3 , UpperCamelCase : Optional[Any]=4.0 , UpperCamelCase : List[str]=[2, 2, 6, 2] , UpperCamelCase : Optional[int]=[64, 1_28, 3_20, 5_12] , UpperCamelCase : Tuple=[7, 3, 3, 3] , UpperCamelCase : Optional[Any]=[4, 2, 2, 2] , UpperCamelCase : Tuple=[2, 1, 1, 1] , UpperCamelCase : List[str]=4 , UpperCamelCase : List[str]=0.0 , UpperCamelCase : Optional[Any]="gelu" , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : Dict=1e-5 , UpperCamelCase : Optional[int]=0.02 , **UpperCamelCase : Any , ):
'''simple docstring'''
_snake_case : Tuple = num_channels
_snake_case : List[str] = patch_size
_snake_case : str = stride
_snake_case : List[str] = padding
_snake_case : Tuple = pool_size
_snake_case : List[Any] = hidden_sizes
_snake_case : List[str] = mlp_ratio
_snake_case : Optional[Any] = depths
_snake_case : Any = patch_sizes
_snake_case : int = strides
_snake_case : Any = num_encoder_blocks
_snake_case : Tuple = drop_path_rate
_snake_case : str = hidden_act
_snake_case : Dict = use_layer_scale
_snake_case : Optional[int] = layer_scale_init_value
_snake_case : List[str] = initializer_range
super().__init__(**UpperCamelCase )
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[str] =version.parse("""1.11""" )
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return 2e-3
| 260 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowerCamelCase_ ( lowerCAmelCase: str , lowerCAmelCase: complex , lowerCAmelCase: str = "x" , lowerCAmelCase: float = 10**-10 , lowerCAmelCase: int = 1 , )-> complex:
_snake_case : Optional[int] = symbols(lowerCAmelCase )
_snake_case : Tuple = lambdify(lowerCAmelCase , lowerCAmelCase )
_snake_case : Union[str, Any] = lambdify(lowerCAmelCase , diff(lowerCAmelCase , lowerCAmelCase ) )
_snake_case : int = starting_point
while True:
if diff_function(lowerCAmelCase ) != 0:
_snake_case : Optional[Any] = prev_guess - multiplicity * func(lowerCAmelCase ) / diff_function(
lowerCAmelCase )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_snake_case : Dict = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 260 | 1 |
"""simple docstring"""
import math
def _snake_case ( _snake_case : int ):
lowerCAmelCase : str = [True] * n
lowerCAmelCase : List[str] = False
lowerCAmelCase : Dict = False
lowerCAmelCase : List[Any] = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
lowerCAmelCase : List[Any] = i * 2
while index < n:
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : List[str] = index + i
lowerCAmelCase : int = [2]
for i in range(3 , _snake_case , 2 ):
if is_prime[i]:
primes.append(_snake_case )
return primes
def _snake_case ( _snake_case : int = 999966663333 ):
lowerCAmelCase : Any = math.floor(math.sqrt(_snake_case ) ) + 100
lowerCAmelCase : str = prime_sieve(_snake_case )
lowerCAmelCase : List[str] = 0
lowerCAmelCase : str = 0
lowerCAmelCase : int = primes[prime_index]
while (last_prime**2) <= limit:
lowerCAmelCase : Optional[Any] = primes[prime_index + 1]
lowerCAmelCase : int = last_prime**2
lowerCAmelCase : Any = next_prime**2
# Get numbers divisible by lps(current)
lowerCAmelCase : Optional[Any] = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowerCAmelCase : int = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowerCAmelCase : List[str] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowerCAmelCase : List[str] = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 60 |
"""simple docstring"""
def _snake_case ( _snake_case : int ):
if not isinstance(_snake_case , _snake_case ):
raise TypeError('''only integers accepted as input''' )
else:
lowerCAmelCase : List[str] = str(abs(_snake_case ) )
lowerCAmelCase : Optional[Any] = [list(_snake_case ) for char in range(len(_snake_case ) )]
for index in range(len(_snake_case ) ):
num_transpositions[index].pop(_snake_case )
return max(
int(''''''.join(list(_snake_case ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 60 | 1 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a__ ( __UpperCamelCase ):
@staticmethod
@abstractmethod
def __SCREAMING_SNAKE_CASE( _A ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
raise NotImplementedError()
| 356 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase__ = 16
UpperCamelCase__ = 32
def _a ( SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : int = 16 ):
__lowerCAmelCase = AutoTokenizer.from_pretrained("bert-base-cased" )
__lowerCAmelCase = load_dataset("glue" , "mrpc" )
def tokenize_function(SCREAMING_SNAKE_CASE_ : str ):
# max_length=None => use the model max length (it's actually the default)
__lowerCAmelCase = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCAmelCase = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCAmelCase = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(SCREAMING_SNAKE_CASE_ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCAmelCase = 16
elif accelerator.mixed_precision != "no":
__lowerCAmelCase = 8
else:
__lowerCAmelCase = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE_ , padding="longest" , max_length=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_tensors="pt" , )
# Instantiate dataloaders.
__lowerCAmelCase = DataLoader(
tokenized_datasets["train"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = DataLoader(
tokenized_datasets["validation"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCamelCase__ = mocked_dataloaders # noqa: F811
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , SCREAMING_SNAKE_CASE_ ) == "1":
__lowerCAmelCase = 2
# New Code #
__lowerCAmelCase = int(args.gradient_accumulation_steps )
# Initialize accelerator
__lowerCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=SCREAMING_SNAKE_CASE_ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCAmelCase = config["lr"]
__lowerCAmelCase = int(config["num_epochs"] )
__lowerCAmelCase = int(config["seed"] )
__lowerCAmelCase = int(config["batch_size"] )
__lowerCAmelCase = evaluate.load("glue" , "mrpc" )
set_seed(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase , __lowerCAmelCase = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=SCREAMING_SNAKE_CASE_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
__lowerCAmelCase = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
# Instantiate scheduler
__lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=1_00 , num_training_steps=(len(SCREAMING_SNAKE_CASE_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE_ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = output.loss
accelerator.backward(SCREAMING_SNAKE_CASE_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = outputs.logits.argmax(dim=-1 )
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , )
__lowerCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , SCREAMING_SNAKE_CASE_ )
def _a ( ):
__lowerCAmelCase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=SCREAMING_SNAKE_CASE_ , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 102 | 0 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
_snake_case : int = "sshleifer/mar_enro_6_3_student"
class a (_lowerCAmelCase ):
"""simple docstring"""
def __snake_case ( self : List[str] ) -> List[str]:
super().setUp()
__snake_case : Tuple = cached_path(
"https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz" , extract_compressed_file=lowerCamelCase , )
__snake_case : Optional[int] = F'{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'
@slow
@require_torch_gpu
def __snake_case ( self : Tuple ) -> Any:
MarianMTModel.from_pretrained(lowerCamelCase )
@slow
@require_torch_gpu
def __snake_case ( self : str ) -> Optional[Any]:
__snake_case : List[str] = {
"$MAX_LEN": 64,
"$BS": 64,
"$GAS": 1,
"$ENRO_DIR": self.data_dir,
"facebook/mbart-large-cc25": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"--learning_rate=3e-5": "--learning_rate 3e-4",
"--num_train_epochs 6": "--num_train_epochs 1",
}
# Clean up bash script
__snake_case : str = (self.test_file_dir / "train_mbart_cc25_enro.sh").open().read().split("finetune.py" )[1].strip()
__snake_case : Optional[Any] = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
for k, v in env_vars_to_replace.items():
__snake_case : int = bash_script.replace(lowerCamelCase , str(lowerCamelCase ) )
__snake_case : Any = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
__snake_case : List[str] = F'\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n '.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
__snake_case : Tuple = ["finetune.py"] + bash_script.split() + args
with patch.object(lowerCamelCase , "argv" , lowerCamelCase ):
__snake_case : Any = argparse.ArgumentParser()
__snake_case : List[str] = pl.Trainer.add_argparse_args(lowerCamelCase )
__snake_case : Union[str, Any] = SummarizationModule.add_model_specific_args(lowerCamelCase , os.getcwd() )
__snake_case : Tuple = parser.parse_args()
__snake_case : List[Any] = main(lowerCamelCase )
# Check metrics
__snake_case : Union[str, Any] = load_json(model.metrics_save_path )
__snake_case : str = metrics["val"][0]
__snake_case : Optional[int] = metrics["val"][-1]
self.assertEqual(len(metrics["val"] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'val_avg_{model.val_metric}'] , lowerCamelCase )
self.assertGreater(last_step_stats["val_avg_gen_time"] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["val_avg_gen_time"] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["val_avg_bleu"] - first_step_stats["val_avg_bleu"] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["val_avg_bleu"] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["val"][-1]["val_avg_bleu"] - metrics["test"][-1]["test_avg_bleu"] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
__snake_case : int = os.listdir(lowerCamelCase )
__snake_case : Tuple = [x for x in contents if x.endswith(".ckpt" )][0]
__snake_case : int = os.path.join(args.output_dir , lowerCamelCase )
__snake_case : List[Any] = torch.load(lowerCamelCase , map_location="cpu" )
__snake_case : int = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__snake_case : Tuple = {os.path.basename(lowerCamelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
class a (_lowerCAmelCase ):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __snake_case ( self : Any ) -> Optional[Any]:
__snake_case : Dict = F'{self.test_file_dir_str}/test_data/wmt_en_ro'
__snake_case : Optional[int] = {
"--fp16_opt_level=O1": "",
"$MAX_LEN": 128,
"$BS": 16,
"$GAS": 1,
"$ENRO_DIR": data_dir,
"$m": "sshleifer/student_marian_en_ro_6_1",
"val_check_interval=0.25": "val_check_interval=1.0",
}
# Clean up bash script
__snake_case : str = (
(self.test_file_dir / "distil_marian_no_teacher.sh").open().read().split("distillation.py" )[1].strip()
)
__snake_case : List[Any] = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
__snake_case : Any = bash_script.replace("--fp16 " , " " )
for k, v in env_vars_to_replace.items():
__snake_case : Optional[Any] = bash_script.replace(lowerCamelCase , str(lowerCamelCase ) )
__snake_case : str = self.get_auto_remove_tmp_dir()
__snake_case : Optional[Any] = bash_script.replace("--fp16" , "" )
__snake_case : Any = 6
__snake_case : str = (
["distillation.py"]
+ bash_script.split()
+ [
F'--output_dir={output_dir}',
"--gpus=1",
"--learning_rate=1e-3",
F'--num_train_epochs={epochs}',
"--warmup_steps=10",
"--val_check_interval=1.0",
"--do_predict",
]
)
with patch.object(lowerCamelCase , "argv" , lowerCamelCase ):
__snake_case : int = argparse.ArgumentParser()
__snake_case : Tuple = pl.Trainer.add_argparse_args(lowerCamelCase )
__snake_case : Optional[int] = SummarizationDistiller.add_model_specific_args(lowerCamelCase , os.getcwd() )
__snake_case : Tuple = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
__snake_case : Dict = distill_main(lowerCamelCase )
# Check metrics
__snake_case : Union[str, Any] = load_json(model.metrics_save_path )
__snake_case : List[Any] = metrics["val"][0]
__snake_case : int = metrics["val"][-1]
assert len(metrics["val"] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'val_avg_{model.val_metric}'] , lowerCamelCase )
# check lightning ckpt can be loaded and has a reasonable statedict
__snake_case : Tuple = os.listdir(lowerCamelCase )
__snake_case : Tuple = [x for x in contents if x.endswith(".ckpt" )][0]
__snake_case : Dict = os.path.join(args.output_dir , lowerCamelCase )
__snake_case : Optional[Any] = torch.load(lowerCamelCase , map_location="cpu" )
__snake_case : Optional[Any] = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__snake_case : List[str] = {os.path.basename(lowerCamelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
| 123 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case : int = logging.get_logger(__name__)
_snake_case : Union[str, Any] = "▁"
_snake_case : Any = {"vocab_file": "prophetnet.tokenizer"}
_snake_case : Tuple = {
"vocab_file": {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"
),
}
}
_snake_case : Any = {
"microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False},
}
_snake_case : Any = {
"microsoft/xprophetnet-large-wiki100-cased": 512,
}
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Union[str, Any] = collections.OrderedDict()
with open(__lowerCamelCase , "r" , encoding="utf-8" ) as reader:
__snake_case : Dict = reader.readlines()
for index, token in enumerate(__lowerCamelCase ):
__snake_case : Optional[int] = token.rstrip("\n" )
__snake_case : Union[str, Any] = index
return vocab
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
__UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Any = ["input_ids", "attention_mask"]
def __init__( self : Any , lowerCamelCase : str , lowerCamelCase : Optional[Any]="[SEP]" , lowerCamelCase : List[str]="[SEP]" , lowerCamelCase : Dict="[SEP]" , lowerCamelCase : Optional[int]="[UNK]" , lowerCamelCase : Dict="[PAD]" , lowerCamelCase : str="[CLS]" , lowerCamelCase : str="[MASK]" , lowerCamelCase : Optional[Dict[str, Any]] = None , **lowerCamelCase : int , ) -> None:
__snake_case : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase , eos_token=lowerCamelCase , sep_token=lowerCamelCase , unk_token=lowerCamelCase , pad_token=lowerCamelCase , cls_token=lowerCamelCase , mask_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
__snake_case : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase ) )
__snake_case : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
__snake_case : List[str] = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
__snake_case : List[str] = F'[unused{i}]'
__snake_case : int = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
__snake_case : Any = 12
__snake_case : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(lowerCamelCase )
def __getstate__( self : Union[str, Any] ) -> Union[str, Any]:
__snake_case : Union[str, Any] = self.__dict__.copy()
__snake_case : int = None
return state
def __setstate__( self : Tuple , lowerCamelCase : List[str] ) -> Tuple:
__snake_case : Tuple = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__snake_case : Tuple = {}
__snake_case : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case ( self : Dict , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return ([0] * len(lowerCamelCase )) + [1]
return ([0] * len(lowerCamelCase )) + [1] + ([0] * len(lowerCamelCase )) + [1]
def __snake_case ( self : Any , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
__snake_case : str = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __snake_case ( self : str ) -> str:
return len(self.sp_model ) + self.fairseq_offset
def __snake_case ( self : Any ) -> int:
__snake_case : Dict = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __snake_case ( self : str , lowerCamelCase : str ) -> str:
return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
def __snake_case ( self : List[str] , lowerCamelCase : Dict ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__snake_case : List[Any] = self.sp_model.PieceToId(lowerCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __snake_case ( self : Dict , lowerCamelCase : Optional[int] ) -> Optional[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __snake_case ( self : int , lowerCamelCase : Union[str, Any] ) -> Optional[Any]:
__snake_case : str = "".join(lowerCamelCase ).replace(lowerCamelCase , " " ).strip()
return out_string
def __snake_case ( self : List[str] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__snake_case : List[str] = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , "wb" ) as fi:
__snake_case : Any = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
return (out_vocab_file,)
def __snake_case ( self : Union[str, Any] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
__snake_case : List[str] = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 123 | 1 |
def lowerCAmelCase_ ( __a , __a ) -> float:
"""simple docstring"""
if discount_rate < 0:
raise ValueError("Discount rate cannot be negative" )
if not cash_flows:
raise ValueError("Cash flows list cannot be empty" )
lowerCamelCase__: int =sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(__a ) )
return round(__a , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 273 |
from math import pow
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> tuple[int, int]:
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
lowerCamelCase__: Optional[Any] =int(pow(__a , __a ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
lowerCamelCase__ , lowerCamelCase__: int =backtrack(
__a , __a , current_number + 1 , __a , __a )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
lowerCamelCase__ , lowerCamelCase__: Dict =backtrack(
__a , __a , current_number + 1 , __a , __a )
return current_sum, solutions_count
def lowerCAmelCase_ ( __a , __a ) -> int:
"""simple docstring"""
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
"Invalid input\n"
"needed_sum must be between 1 and 1000, power between 2 and 10." )
return backtrack(__a , __a , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 273 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( __snake_case , unittest.TestCase ):
__magic_name__ = CLIPTokenizer
__magic_name__ = CLIPTokenizerFast
__magic_name__ = True
__magic_name__ = {}
__magic_name__ = False
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
super().setUp()
# fmt: off
A : List[Any] = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
A : Tuple = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
A : Any = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''']
A : List[Any] = {'''unk_token''': '''<unk>'''}
A : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE ) )
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
A : str = '''lower newer'''
A : int = '''lower newer'''
return input_text, output_text
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Tuple = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
A : Optional[Any] = '''lower newer'''
A : int = ['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>''']
A : List[str] = tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : List[Any] = tokens + [tokenizer.unk_token]
A : Any = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
@require_ftfy
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A : int = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
A : List[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
A : List[str] = '''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'''
A : Any = tokenizer_s.tokenize(SCREAMING_SNAKE_CASE )
A : List[Any] = tokenizer_r.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
A : List[Any] = '''xa\u0303y''' + ''' ''' + '''x\xe3y'''
A : Union[str, Any] = tokenizer_s.tokenize(SCREAMING_SNAKE_CASE )
A : Dict = tokenizer_r.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Test that the tokenization is identical on unicode of space type
A : Tuple = [
'''\u0009''', # (horizontal tab, '\t')
'''\u000B''', # (vertical tab)
'''\u000C''', # (form feed)
'''\u0020''', # (space, ' ')
'''\u200E''', # (left-to-right mark):w
'''\u200F''', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
A : List[str] = tokenizer_s.tokenize(SCREAMING_SNAKE_CASE )
A : List[Any] = tokenizer_r.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Test that the tokenization is identical on unicode of line break type
A : int = [
'''\u000A''', # (line feed, '\n')
'''\r\n''', # (carriage return and line feed, '\r\n')
'''\u000D''', # (carriage return, '\r')
'''\r''', # (carriage return, '\r')
'''\u000D''', # (carriage return, '\r')
'''\u2028''', # (line separator)
'''\u2029''', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
A : List[str] = tokenizer_s.tokenize(SCREAMING_SNAKE_CASE )
A : Tuple = tokenizer_r.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A : int = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
A : List[Any] = F'{text_of_1_token} {text_of_1_token}'
A : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE , )
A : Optional[int] = tokenizer_r(SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE ) + 1, len(SCREAMING_SNAKE_CASE ) + 1 + len(SCREAMING_SNAKE_CASE )) , )
A : Any = F' {text}'
A : Dict = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE , )
A : int = tokenizer_r(SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE ) + 1, 1 + len(SCREAMING_SNAKE_CASE ) + 1 + len(SCREAMING_SNAKE_CASE )) , )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE ) as context:
self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' )
self.assertTrue(
context.exception.args[0].startswith(
'''The `backend_tokenizer` provided does not match the expected format.''' ) )
@require_ftfy
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
| 3 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> None:
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
| 29 | 0 |
"""simple docstring"""
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="attention" ) ->List[Any]:
"""simple docstring"""
a_ = params[F'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
a_ = params[F'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
a_ = params[F'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
a_ = params[F'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) ->List[str]:
"""simple docstring"""
if split_mlp_wi:
a_ = params[F'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
a_ = params[F'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
a_ = (wi_a, wi_a)
else:
a_ = params[F'''{prefix}/layers_{i}/mlp/wi/kernel''']
a_ = params[F'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
return params[F'''{prefix}/layers_{i}/{layer_name}/scale''']
def UpperCamelCase ( UpperCAmelCase , *, UpperCAmelCase , UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = traverse_util.flatten_dict(variables["target"] )
a_ = {"/".join(UpperCAmelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
a_ = "encoder/layers_0/mlp/wi_0/kernel" in old
print("Split MLP:" , UpperCAmelCase )
a_ = collections.OrderedDict()
# Shared embeddings.
a_ = old["token_embedder/embedding"]
# Encoder.
for i in range(UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
a_ = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , "encoder" , "pre_attention_layer_norm" )
a_ , a_ , a_ , a_ = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , "encoder" , "attention" )
a_ = layer_norm
a_ = k.T
a_ = o.T
a_ = q.T
a_ = v.T
# Block i, layer 1 (MLP).
a_ = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , "encoder" , "pre_mlp_layer_norm" )
a_ , a_ = tax_mlp_lookup(UpperCAmelCase , UpperCAmelCase , "encoder" , UpperCAmelCase )
a_ = layer_norm
if split_mlp_wi:
a_ = wi[0].T
a_ = wi[1].T
else:
a_ = wi.T
a_ = wo.T
a_ = old[
"encoder/relpos_bias/rel_embedding"
].T
a_ = old["encoder/encoder_norm/scale"]
if not is_encoder_only:
# Decoder.
for i in range(UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
a_ = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , "decoder" , "pre_self_attention_layer_norm" )
a_ , a_ , a_ , a_ = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , "decoder" , "self_attention" )
a_ = layer_norm
a_ = k.T
a_ = o.T
a_ = q.T
a_ = v.T
# Block i, layer 1 (Cross Attention).
a_ = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , "decoder" , "pre_cross_attention_layer_norm" )
a_ , a_ , a_ , a_ = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , "decoder" , "encoder_decoder_attention" )
a_ = layer_norm
a_ = k.T
a_ = o.T
a_ = q.T
a_ = v.T
# Block i, layer 2 (MLP).
a_ = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , "decoder" , "pre_mlp_layer_norm" )
a_ , a_ = tax_mlp_lookup(UpperCAmelCase , UpperCAmelCase , "decoder" , UpperCAmelCase )
a_ = layer_norm
if split_mlp_wi:
a_ = wi[0].T
a_ = wi[1].T
else:
a_ = wi.T
a_ = wo.T
a_ = old["decoder/decoder_norm/scale"]
a_ = old[
"decoder/relpos_bias/rel_embedding"
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
a_ = old["decoder/logits_dense/kernel"].T
return new
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Any:
"""simple docstring"""
a_ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
a_ = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
a_ = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
a_ = state_dict["shared.weight"]
return state_dict
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = checkpoints.load_tax_checkpoint(UpperCAmelCase )
a_ = convert_tax_to_pytorch(UpperCAmelCase , num_layers=config.num_layers , is_encoder_only=UpperCAmelCase )
a_ = make_state_dict(UpperCAmelCase , UpperCAmelCase )
model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False ) ->int:
"""simple docstring"""
a_ = TaConfig.from_json_file(UpperCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
a_ = TaEncoderModel(UpperCAmelCase )
else:
a_ = TaForConditionalGeneration(UpperCAmelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCAmelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCAmelCase )
print("Done" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
UpperCamelCase_ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
) | 360 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
a_ = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
a_ = True if "large" in model_name or "huge" in model_name else False
a_ = True if "large" in model_name or "huge" in model_name else False
a_ = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
a_ = [3, 3, 3, 3]
a_ = [5, 5, 5, 5]
elif "fl4" in model_name:
a_ = [4, 4, 4, 4]
a_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
a_ = [3, 3, 3, 3]
if "lrf" in model_name:
a_ = [3, 3, 3, 3]
else:
a_ = [2, 2, 2, 2]
if "tiny" in model_name:
a_ = 96
elif "small" in model_name:
a_ = 96
elif "base" in model_name:
a_ = 128
elif "large" in model_name:
a_ = 192
elif "xlarge" in model_name:
a_ = 256
elif "huge" in model_name:
a_ = 352
# set label information
a_ = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
a_ = "imagenet-22k-id2label.json"
else:
a_ = "imagenet-1k-id2label.json"
a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) )
a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
a_ = {v: k for k, v in idalabel.items()}
a_ = FocalNetConfig(
embed_dim=UpperCAmelCase , depths=UpperCAmelCase , focal_levels=UpperCAmelCase , focal_windows=UpperCAmelCase , use_conv_embed=UpperCAmelCase , idalabel=UpperCAmelCase , labelaid=UpperCAmelCase , use_post_layernorm=UpperCAmelCase , use_layerscale=UpperCAmelCase , )
return config
def UpperCamelCase ( UpperCAmelCase ) ->Any:
"""simple docstring"""
if "patch_embed.proj" in name:
a_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a_ = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
a_ = "encoder." + name
if "encoder.layers" in name:
a_ = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
a_ = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
a_ = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
a_ = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
a_ = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
a_ = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
a_ = "layernorm.weight"
if name == "norm.bias":
a_ = "layernorm.bias"
if "head" in name:
a_ = name.replace("head" , "classifier" )
else:
a_ = "focalnet." + name
return name
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) ->Dict:
"""simple docstring"""
a_ = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
a_ = model_name_to_url[model_name]
print("Checkpoint URL: " , UpperCAmelCase )
a_ = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
a_ = get_focalnet_config(UpperCAmelCase )
a_ = FocalNetForImageClassification(UpperCAmelCase )
model.eval()
# load state dict
model.load_state_dict(UpperCAmelCase )
# verify conversion
a_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
a_ = BitImageProcessor(
do_resize=UpperCAmelCase , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase , crop_size=224 , do_normalize=UpperCAmelCase , image_mean=UpperCAmelCase , image_std=UpperCAmelCase , )
a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
a_ = processor(images=UpperCAmelCase , return_tensors="pt" )
a_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
a_ = image_transforms(UpperCAmelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , UpperCAmelCase , atol=1E-4 )
a_ = model(**UpperCAmelCase )
a_ = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
a_ = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
a_ = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
a_ = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
a_ = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
a_ = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
a_ = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
UpperCamelCase_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 303 | 0 |
from __future__ import annotations
def A (__A : str , __A : Optional[int] , __A : Any , __A : Union[str, Any] ) -> list:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
UpperCAmelCase_ = result + left + right
return input_list
def A (__A : str ) -> list:
"""simple docstring"""
if len(__snake_case ) <= 1:
return input_list
UpperCAmelCase_ = list(__snake_case )
# iteration for two-way merging
UpperCAmelCase_ = 2
while p <= len(__snake_case ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(__snake_case ) , __snake_case ):
UpperCAmelCase_ = i
UpperCAmelCase_ = i + p - 1
UpperCAmelCase_ = (low + high + 1) // 2
UpperCAmelCase_ = merge(__snake_case , __snake_case , __snake_case , __snake_case )
# final merge of last two parts
if p * 2 >= len(__snake_case ):
UpperCAmelCase_ = i
UpperCAmelCase_ = merge(__snake_case , 0 , __snake_case , len(__snake_case ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
snake_case_ : Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
if user_input == "":
snake_case_ : Optional[int] = []
else:
snake_case_ : int = [int(item.strip()) for item in user_input.split(",")]
print(iter_merge_sort(unsorted))
| 51 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.6, 'eval_loss': 0.9},
},
{
'framework': 'tensorflow',
'script': 'run_tf.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 0.9},
},
] )
class A__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: int) -> Tuple:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=_SCREAMING_SNAKE_CASE , )
assert hasattr(self , "env")
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Any=1) -> Dict:
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=_SCREAMING_SNAKE_CASE , instance_type=self.instance_type , debugger_hook_config=_SCREAMING_SNAKE_CASE , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: List[Any]) -> Optional[Any]:
"""simple docstring"""
TrainingJobAnalytics(_SCREAMING_SNAKE_CASE).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""")
def _SCREAMING_SNAKE_CASE ( self: str) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.create_estimator()
# run training
estimator.fit()
# result dataframe
__lowerCAmelCase : Tuple = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
__lowerCAmelCase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"])
__lowerCAmelCase : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCAmelCase : Tuple = (
Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds" , 99_9999)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy)
assert all(t <= self.results["eval_loss"] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , "w") as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , _SCREAMING_SNAKE_CASE) | 269 | 0 |
# flake8: noqa
# Lint as: python3
__a = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 173 |
def __lowercase ( _UpperCamelCase = 50 ) ->int:
"""simple docstring"""
lowercase : str = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2, 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 173 | 1 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = "▁"
_snake_case = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
}
_snake_case = {
"vocab_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"
),
},
"spm_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"
)
},
}
_snake_case = {
"facebook/s2t-small-librispeech-asr": 1024,
}
_snake_case = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"]
_snake_case = {"mustc": MUSTC_LANGS}
class lowercase ( __lowerCAmelCase ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = MAX_MODEL_INPUT_SIZES
_a = ['''input_ids''', '''attention_mask''']
_a = []
def __init__( self , _a , _a , _a="<s>" , _a="</s>" , _a="<pad>" , _a="<unk>" , _a=False , _a=False , _a=None , _a=None , _a = None , **_a , ) -> str:
_A : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , do_upper_case=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , lang_codes=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
_A : Tuple = do_upper_case
_A : Tuple = do_lower_case
_A : int = load_json(_UpperCAmelCase )
_A : Any = {v: k for k, v in self.encoder.items()}
_A : Optional[int] = spm_file
_A : Optional[Any] = load_spm(_UpperCAmelCase , self.sp_model_kwargs )
if lang_codes is not None:
_A : List[str] = lang_codes
_A : List[str] = LANGUAGES[lang_codes]
_A : List[Any] = [F'''<lang:{lang}>''' for lang in self.langs]
_A : int = {lang: self.sp_model.PieceToId(F'''<lang:{lang}>''' ) for lang in self.langs}
_A : Optional[Any] = self.lang_tokens
_A : Union[str, Any] = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
_A : Dict = {}
@property
def a__ ( self ) -> List[str]:
return len(self.encoder )
@property
def a__ ( self ) -> Dict:
return self._tgt_lang
@tgt_lang.setter
def a__ ( self , _a ) -> str:
_A : str = new_tgt_lang
self.set_tgt_lang_special_tokens(_UpperCAmelCase )
def a__ ( self , _a ) -> List[str]:
_A : Union[str, Any] = self.lang_code_to_id[tgt_lang]
_A : List[str] = [lang_code_id]
def a__ ( self , _a ) -> str:
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def a__ ( self , _a ) -> int:
return self.encoder.get(_UpperCAmelCase , self.encoder[self.unk_token] )
def a__ ( self , _a ) -> Dict:
return self.decoder.get(_UpperCAmelCase , self.unk_token )
def a__ ( self , _a ) -> str:
_A : List[str] = []
_A : List[str] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_A : Optional[int] = self.sp_model.decode(_UpperCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_A : str = []
else:
current_sub_tokens.append(_UpperCAmelCase )
_A : Union[str, Any] = self.sp_model.decode(_UpperCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def a__ ( self , _a , _a=None ) -> Any:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def a__ ( self , _a , _a = None , _a = False ) -> Union[str, Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
_A : int = [1] * len(self.prefix_tokens )
_A : Any = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones
def a__ ( self ) -> List[Any]:
_A : List[str] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
_A : Union[str, Any] = self.__dict__.copy()
_A : Union[str, Any] = None
return state
def __setstate__( self , _a ) -> Union[str, Any]:
_A : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_A : Dict = {}
_A : List[Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def a__ ( self , _a , _a = None ) -> List[str]:
_A : Optional[int] = Path(_UpperCAmelCase )
assert save_dir.is_dir(), F'''{save_directory} should be a directory'''
_A : Union[str, Any] = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
_A : List[str] = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , _UpperCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _UpperCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(_UpperCAmelCase , """wb""" ) as fi:
_A : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (str(_UpperCAmelCase ), str(_UpperCAmelCase ))
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = sentencepiece.SentencePieceProcessor(**_snake_case )
spm.Load(str(_snake_case ) )
return spm
def lowerCAmelCase_ ( snake_case_ ):
with open(_snake_case,"""r""" ) as f:
return json.load(_snake_case )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
with open(_snake_case,"""w""" ) as f:
json.dump(_snake_case,_snake_case,indent=2 )
| 26 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
a = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
a = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
a = r'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int]=False ):
_A = spearmanr(_UpperCAmelCase , _UpperCAmelCase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 315 | 0 |
def UpperCAmelCase ( _lowerCamelCase = 50 ):
A : int = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"""{solution() = }""") | 256 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {"""vocab_file""": """spiece.model"""}
__SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""bert_for_seq_generation""": (
"""https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"""
),
}
}
__SCREAMING_SNAKE_CASE = {"""bert_for_seq_generation""": 512}
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = []
a__ = ["input_ids", "attention_mask"]
def __init__( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : int="<s>" , __lowerCamelCase : List[str]="</s>" , __lowerCamelCase : Dict="<unk>" , __lowerCamelCase : Optional[int]="<pad>" , __lowerCamelCase : Optional[int]="<::::>" , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Tuple , ) -> None:
A : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , sep_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
A : Union[str, Any] = vocab_file
A : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict:
A : str = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) -> Tuple:
A : Tuple = self.__dict__.copy()
A : Optional[int] = None
return state
def __setstate__( self : Dict , __lowerCamelCase : Union[str, Any] ) -> Tuple:
A : Optional[int] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A : int = {}
A : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Union[str, Any] ) -> Dict:
return self.sp_model.piece_to_id(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Tuple ) -> Optional[Any]:
A : Optional[int] = self.sp_model.IdToPiece(__lowerCamelCase )
return token
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Optional[int] ) -> List[str]:
A : List[str] = []
A : List[str] = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCamelCase ) + token
A : Union[str, Any] = []
else:
current_sub_tokens.append(__lowerCamelCase )
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A : str = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
A : str = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,) | 256 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case :
def __init__( self , a , a=12 , a=7 , a=True , a=True , a=True , a=99 , a=32 , a=32 , a=2 , a=4 , a=37 , a=0.1 , a=0.1 , a=512 , a=0.02 , a=0 , a=None , ) -> str:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = projection_dim
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = bos_token_id
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length])
if input_mask is not None:
SCREAMING_SNAKE_CASE = input_mask.numpy()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = input_mask.shape
SCREAMING_SNAKE_CASE = np.random.randint(1 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(a):
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, tf.convert_to_tensor(a)
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> List[str]:
SCREAMING_SNAKE_CASE = TFBlipTextModel(config=a)
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , training=a)
SCREAMING_SNAKE_CASE = model(a , training=a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _snake_case ( A__ , unittest.TestCase ):
_lowercase : Tuple = (TFBlipTextModel,) if is_tf_available() else ()
_lowercase : Tuple = False
_lowercase : int = False
_lowercase : Dict = False
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = BlipTextModelTester(self)
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=a , hidden_size=37)
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
pass
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
pass
@unittest.skip(reason='Blip does not use inputs_embeds')
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING')
def SCREAMING_SNAKE_CASE__ ( self) -> int:
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING')
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = TFBlipTextModel.from_pretrained(a)
self.assertIsNotNone(a)
def SCREAMING_SNAKE_CASE__ ( self , a=True) -> Optional[Any]:
super().test_pt_tf_model_equivalence(allow_missing_keys=a)
| 137 |
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = test_file.split(os.path.sep)
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
F'''{test_file} instead.''')
SCREAMING_SNAKE_CASE = components[-1]
if not test_fn.endswith('py'):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''')
if not test_fn.startswith('test_modeling_'):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''')
SCREAMING_SNAKE_CASE = components[:-1] + [test_fn.replace('.py' , '')]
SCREAMING_SNAKE_CASE = '.'.join(_UpperCAmelCase)
return test_module_path
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = get_module_path(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = importlib.import_module(_UpperCAmelCase)
return test_module
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = get_test_module(_UpperCAmelCase)
for attr in dir(_UpperCAmelCase):
if attr.endswith('ModelTester'):
tester_classes.append(getattr(_UpperCAmelCase , _UpperCAmelCase))
# sort with class names
return sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: x.__name__)
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = get_test_module(_UpperCAmelCase)
for attr in dir(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , _UpperCAmelCase)
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , 'all_model_classes' , [])
if len(_UpperCAmelCase) > 0:
test_classes.append(_UpperCAmelCase)
# sort with class names
return sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: x.__name__)
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = get_test_classes(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes)
# sort with class names
return sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: x.__name__)
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = test_class()
if hasattr(_UpperCAmelCase , 'setUp'):
test.setUp()
SCREAMING_SNAKE_CASE = None
if hasattr(_UpperCAmelCase , 'model_tester'):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
SCREAMING_SNAKE_CASE = test.model_tester.__class__
return model_tester
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = get_test_classes(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_UpperCAmelCase)
# sort with class names
return sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: x.__name__)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = get_test_classes_for_model(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = []
for test_class in test_classes:
SCREAMING_SNAKE_CASE = get_model_tester_from_test_class(_UpperCAmelCase)
if tester_class is not None:
tester_classes.append(_UpperCAmelCase)
# sort with class names
return sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: x.__name__)
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = get_test_classes(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = {test_class: get_model_tester_from_test_class(_UpperCAmelCase) for test_class in test_classes}
return test_tester_mapping
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = get_model_classes(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = {
model_class: get_test_classes_for_model(_UpperCAmelCase , _UpperCAmelCase) for model_class in model_classes
}
return model_test_mapping
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = get_model_classes(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = {
model_class: get_tester_classes_for_model(_UpperCAmelCase , _UpperCAmelCase) for model_class in model_classes
}
return model_to_tester_mapping
def lowerCamelCase__ (_UpperCAmelCase):
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
return o
elif isinstance(_UpperCAmelCase , _UpperCAmelCase):
return o.__name__
elif isinstance(_UpperCAmelCase , (list, tuple)):
return [to_json(_UpperCAmelCase) for x in o]
elif isinstance(_UpperCAmelCase , _UpperCAmelCase):
return {to_json(_UpperCAmelCase): to_json(_UpperCAmelCase) for k, v in o.items()}
else:
return o
| 137 | 1 |
"""simple docstring"""
lowerCAmelCase__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = [False] * len(SCREAMING_SNAKE_CASE )
lowerCAmelCase : str = [s]
lowerCAmelCase : Optional[int] = True
while queue:
lowerCAmelCase : Optional[int] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[Any] = True
lowerCAmelCase : Any = u
return visited[t]
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Tuple = [-1] * (len(SCREAMING_SNAKE_CASE ))
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : List[Any] = []
lowerCAmelCase : Dict = [i[:] for i in graph] # Record original cut, copy.
while bfs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Dict = float("Inf" )
lowerCAmelCase : List[str] = sink
while s != source:
# Find the minimum value in select path
lowerCAmelCase : int = min(SCREAMING_SNAKE_CASE , graph[parent[s]][s] )
lowerCAmelCase : Union[str, Any] = parent[s]
max_flow += path_flow
lowerCAmelCase : int = sink
while v != source:
lowerCAmelCase : str = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCAmelCase : List[Any] = parent[v]
for i in range(len(SCREAMING_SNAKE_CASE ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 356 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
lowerCAmelCase__ = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Any ="tapas"
def __init__( self , snake_case__=30_522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=1_024 , snake_case__=[3, 256, 256, 2, 256, 256, 10] , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=0 , snake_case__=10.0 , snake_case__=0 , snake_case__=1.0 , snake_case__=None , snake_case__=1.0 , snake_case__=False , snake_case__=None , snake_case__=1.0 , snake_case__=1.0 , snake_case__=False , snake_case__=False , snake_case__="ratio" , snake_case__=None , snake_case__=None , snake_case__=64 , snake_case__=32 , snake_case__=False , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=True , snake_case__=False , snake_case__=None , snake_case__=None , **snake_case__ , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
lowerCAmelCase : List[Any] = vocab_size
lowerCAmelCase : List[str] = hidden_size
lowerCAmelCase : Optional[Any] = num_hidden_layers
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : Optional[Any] = hidden_act
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : Optional[Any] = hidden_dropout_prob
lowerCAmelCase : str = attention_probs_dropout_prob
lowerCAmelCase : Any = max_position_embeddings
lowerCAmelCase : Dict = type_vocab_sizes
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : str = layer_norm_eps
# Fine-tuning task hyperparameters
lowerCAmelCase : Dict = positive_label_weight
lowerCAmelCase : Union[str, Any] = num_aggregation_labels
lowerCAmelCase : Optional[Any] = aggregation_loss_weight
lowerCAmelCase : List[Any] = use_answer_as_supervision
lowerCAmelCase : Dict = answer_loss_importance
lowerCAmelCase : List[Any] = use_normalized_answer_loss
lowerCAmelCase : List[str] = huber_loss_delta
lowerCAmelCase : Optional[int] = temperature
lowerCAmelCase : Optional[int] = aggregation_temperature
lowerCAmelCase : Any = use_gumbel_for_cells
lowerCAmelCase : Union[str, Any] = use_gumbel_for_aggregation
lowerCAmelCase : Union[str, Any] = average_approximation_function
lowerCAmelCase : int = cell_selection_preference
lowerCAmelCase : Dict = answer_loss_cutoff
lowerCAmelCase : Optional[int] = max_num_rows
lowerCAmelCase : Union[str, Any] = max_num_columns
lowerCAmelCase : Any = average_logits_per_cell
lowerCAmelCase : List[Any] = select_one_column
lowerCAmelCase : Tuple = allow_empty_column_selection
lowerCAmelCase : str = init_cell_selection_weights_to_zero
lowerCAmelCase : List[Any] = reset_position_index_per_cell
lowerCAmelCase : Optional[Any] = disable_per_token_loss
# Aggregation hyperparameters
lowerCAmelCase : List[str] = aggregation_labels
lowerCAmelCase : List[str] = no_aggregation_label_index
if isinstance(self.aggregation_labels , snake_case__ ):
lowerCAmelCase : Union[str, Any] = {int(snake_case__ ): v for k, v in aggregation_labels.items()}
| 133 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : Optional[Any] ='▁'
__lowerCAmelCase : Union[str, Any] ={'vocab_file': 'spiece.model'}
__lowerCAmelCase : Optional[int] ={
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
__lowerCAmelCase : Optional[int] ={
'google/pegasus-xsum': 5_1_2,
}
__lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self :str , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Dict="<pad>" , lowerCAmelCase__ :List[Any]="</s>" , lowerCAmelCase__ :Tuple="<unk>" , lowerCAmelCase__ :str="<mask_2>" , lowerCAmelCase__ :Dict="<mask_1>" , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :Union[str, Any]=103 , lowerCAmelCase__ :Optional[Dict[str, Any]] = None , **lowerCAmelCase__ :str , ) -> None:
__SCREAMING_SNAKE_CASE : int = offset
if additional_special_tokens is not None:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowerCAmelCase__ )}, but is'''
f''' {type(lowerCAmelCase__ )}''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowerCAmelCase__ ) , self.offset - 1 )
]
if len(set(lowerCAmelCase__ ) ) != len(lowerCAmelCase__ ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
__SCREAMING_SNAKE_CASE : List[str] = additional_special_tokens_extended
else:
__SCREAMING_SNAKE_CASE : List[Any] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
__SCREAMING_SNAKE_CASE : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token_sent=lowerCAmelCase__ , offset=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Dict = mask_token_sent
__SCREAMING_SNAKE_CASE : Any = vocab_file
__SCREAMING_SNAKE_CASE : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase__ )
# add special tokens to encoder dict
__SCREAMING_SNAKE_CASE : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
__SCREAMING_SNAKE_CASE : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def __magic_name__( self :Dict ) -> int:
return len(self.sp_model ) + self.offset
def __magic_name__( self :Dict ) -> Dict[str, int]:
__SCREAMING_SNAKE_CASE : List[str] = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE : str = self.__dict__.copy()
__SCREAMING_SNAKE_CASE : Optional[Any] = None
return state
def __setstate__( self :Any , lowerCAmelCase__ :Tuple ) -> Tuple:
__SCREAMING_SNAKE_CASE : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__SCREAMING_SNAKE_CASE : Any = {}
__SCREAMING_SNAKE_CASE : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :str ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :str ) -> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
__SCREAMING_SNAKE_CASE : Optional[Any] = self.sp_model.piece_to_id(lowerCAmelCase__ )
return sp_id + self.offset
def __magic_name__( self :List[Any] , lowerCAmelCase__ :int ) -> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.sp_model.IdToPiece(index - self.offset )
return token
def __magic_name__( self :Tuple , lowerCAmelCase__ :Optional[int] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Dict = []
__SCREAMING_SNAKE_CASE : Tuple = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase__ ) + token
__SCREAMING_SNAKE_CASE : int = []
else:
current_sub_tokens.append(lowerCAmelCase__ )
out_string += self.sp_model.decode(lowerCAmelCase__ )
return out_string.strip()
def __magic_name__( self :Any , lowerCAmelCase__ :Optional[Any]=False ) -> Dict:
return 1
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :int ) -> str:
__SCREAMING_SNAKE_CASE : List[Any] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def __magic_name__( self :int , lowerCAmelCase__ :List , lowerCAmelCase__ :Optional[List] = None , lowerCAmelCase__ :bool = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(lowerCAmelCase__ )
elif token_ids_a is None:
return self._special_token_mask(lowerCAmelCase__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __magic_name__( self :List[str] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any]=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __magic_name__( self :int , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , '''wb''' ) as fi:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
| 9 |
def __SCREAMING_SNAKE_CASE ( snake_case_ = 1000 ):
'''simple docstring'''
_UpperCAmelCase = 2**power
_UpperCAmelCase = 0
while n:
_UpperCAmelCase , _UpperCAmelCase = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 133 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Any = logging.get_logger(__name__)
def __lowercase ( _a , _a=False ):
snake_case_ : Dict = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('''head''' ):
snake_case_ : Dict = '''segformer.encoder.''' + key
if key.startswith('''backbone''' ):
snake_case_ : List[str] = key.replace('''backbone''' , '''segformer.encoder''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
snake_case_ : str = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
snake_case_ : Optional[Any] = key.replace(f"patch_embed{idx}" , f"patch_embeddings.{int(_a )-1}" )
if "norm" in key:
snake_case_ : int = key.replace('''norm''' , '''layer_norm''' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
snake_case_ : Dict = key[key.find('''segformer.encoder.layer_norm''' ) + len('''segformer.encoder.layer_norm''' )]
snake_case_ : str = key.replace(f"layer_norm{idx}" , f"layer_norm.{int(_a )-1}" )
if "layer_norm1" in key:
snake_case_ : Union[str, Any] = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
snake_case_ : List[Any] = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
snake_case_ : Union[str, Any] = key[key.find('''block''' ) + len('''block''' )]
snake_case_ : Dict = key.replace(f"block{idx}" , f"block.{int(_a )-1}" )
if "attn.q" in key:
snake_case_ : List[str] = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
snake_case_ : int = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
snake_case_ : int = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
snake_case_ : Tuple = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
snake_case_ : List[Any] = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
snake_case_ : Tuple = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
snake_case_ : Any = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
snake_case_ : List[str] = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
snake_case_ : List[Any] = key[key.find('''linear_c''' ) + len('''linear_c''' )]
snake_case_ : str = key.replace(f"linear_c{idx}" , f"linear_c.{int(_a )-1}" )
if key.startswith('''head''' ):
snake_case_ : List[str] = key.replace('''head''' , '''classifier''' )
snake_case_ : str = value
return new_state_dict
def __lowercase ( _a , _a ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
snake_case_ : Optional[int] = state_dict.pop(f"segformer.encoder.block.{i}.{j}.attention.self.kv.weight" )
snake_case_ : Tuple = state_dict.pop(f"segformer.encoder.block.{i}.{j}.attention.self.kv.bias" )
# next, add keys and values (in that order) to the state dict
snake_case_ : Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
snake_case_ : List[Any] = kv_bias[: config.hidden_sizes[i]]
snake_case_ : str = kv_weight[
config.hidden_sizes[i] :, :
]
snake_case_ : Tuple = kv_bias[
config.hidden_sizes[i] :
]
def __lowercase ( ):
snake_case_ : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ : str = Image.open(requests.get(_a , stream=_a ).raw )
return image
@torch.no_grad()
def __lowercase ( _a , _a , _a ):
snake_case_ : List[Any] = SegformerConfig()
snake_case_ : Tuple = False
# set attributes based on model_name
snake_case_ : Dict = '''huggingface/label-files'''
if "segformer" in model_name:
snake_case_ : int = model_name[len('''segformer.''' ) : len('''segformer.''' ) + 2]
if "ade" in model_name:
snake_case_ : List[Any] = 150
snake_case_ : Union[str, Any] = '''ade20k-id2label.json'''
snake_case_ : Any = (1, 150, 128, 128)
elif "city" in model_name:
snake_case_ : Union[str, Any] = 19
snake_case_ : str = '''cityscapes-id2label.json'''
snake_case_ : Optional[int] = (1, 19, 128, 128)
else:
raise ValueError(f"Model {model_name} not supported" )
elif "mit" in model_name:
snake_case_ : Union[str, Any] = True
snake_case_ : Union[str, Any] = model_name[4:6]
snake_case_ : str = 1_000
snake_case_ : Union[str, Any] = '''imagenet-1k-id2label.json'''
snake_case_ : Optional[Any] = (1, 1_000)
else:
raise ValueError(f"Model {model_name} not supported" )
# set config attributes
snake_case_ : Optional[int] = json.load(open(hf_hub_download(_a , _a , repo_type='''dataset''' ) , '''r''' ) )
snake_case_ : int = {int(_a ): v for k, v in idalabel.items()}
snake_case_ : Any = idalabel
snake_case_ : Optional[int] = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
snake_case_ : Union[str, Any] = [64, 128, 320, 512]
snake_case_ : Union[str, Any] = 256
elif size == "b2":
snake_case_ : Union[str, Any] = [64, 128, 320, 512]
snake_case_ : Union[str, Any] = 768
snake_case_ : Union[str, Any] = [3, 4, 6, 3]
elif size == "b3":
snake_case_ : str = [64, 128, 320, 512]
snake_case_ : Union[str, Any] = 768
snake_case_ : Optional[Any] = [3, 4, 18, 3]
elif size == "b4":
snake_case_ : int = [64, 128, 320, 512]
snake_case_ : Optional[int] = 768
snake_case_ : Optional[Any] = [3, 8, 27, 3]
elif size == "b5":
snake_case_ : Dict = [64, 128, 320, 512]
snake_case_ : Tuple = 768
snake_case_ : Optional[Any] = [3, 6, 40, 3]
else:
raise ValueError(f"Size {size} not supported" )
# load image processor (only resize + normalize)
snake_case_ : Tuple = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_a , align=_a , do_random_crop=_a )
# prepare image
snake_case_ : List[Any] = prepare_img()
snake_case_ : Optional[int] = image_processor(images=_a , return_tensors='''pt''' ).pixel_values
logger.info(f"Converting model {model_name}..." )
# load original state dict
if encoder_only:
snake_case_ : Optional[Any] = torch.load(_a , map_location=torch.device('''cpu''' ) )
else:
snake_case_ : Optional[Any] = torch.load(_a , map_location=torch.device('''cpu''' ) )['''state_dict''']
# rename keys
snake_case_ : Tuple = rename_keys(_a , encoder_only=_a )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(_a , _a )
# create HuggingFace model and load state dict
if encoder_only:
snake_case_ : int = False
snake_case_ : List[str] = SegformerForImageClassification(_a )
else:
snake_case_ : List[Any] = SegformerForSemanticSegmentation(_a )
model.load_state_dict(_a )
model.eval()
# forward pass
snake_case_ : List[Any] = model(_a )
snake_case_ : Optional[Any] = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
snake_case_ : int = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
snake_case_ : Union[str, Any] = torch.tensor(
[
[[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
snake_case_ : Any = torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
snake_case_ : str = torch.tensor(
[
[[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
snake_case_ : Optional[Any] = torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
snake_case_ : Any = torch.tensor(
[
[[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
snake_case_ : Dict = torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
snake_case_ : str = torch.tensor(
[
[[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]],
[[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
snake_case_ : Tuple = torch.tensor(
[
[
[-1.1372E01, -1.2787E01, -1.3477E01],
[-1.2536E01, -1.4194E01, -1.4409E01],
[-1.3217E01, -1.4888E01, -1.5327E01],
],
[
[-1.4791E01, -1.7122E01, -1.8277E01],
[-1.7163E01, -1.9192E01, -1.9533E01],
[-1.7897E01, -1.9991E01, -2.0315E01],
],
[
[7.6723E-01, 4.1921E-01, -7.7878E-02],
[4.7772E-01, 9.5557E-03, -2.8082E-01],
[3.6032E-01, -2.4826E-01, -5.1168E-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
snake_case_ : Any = torch.tensor(
[
[[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
snake_case_ : Union[str, Any] = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
snake_case_ : Optional[int] = torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
snake_case_ : Tuple = torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
snake_case_ : List[str] = torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
snake_case_ : Optional[int] = torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]],
] )
else:
snake_case_ : int = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , _a , atol=1E-2 )
# finally, save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(_a ).mkdir(exist_ok=_a )
model.save_pretrained(_a )
image_processor.save_pretrained(_a )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''segformer.b0.512x512.ade.160k''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowercase__ : Dict = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 155 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class _UpperCAmelCase ( unittest.TestCase):
_lowerCAmelCase : Optional[int] = MODEL_FOR_CAUSAL_LM_MAPPING
_lowerCAmelCase : Union[str, Any] = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def _snake_case ( self : Any ):
snake_case_ : Dict = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
snake_case_ : List[str] = text_generator('''This is a test''' , do_sample=lowercase_ )
self.assertEqual(
lowercase_ , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
snake_case_ : Tuple = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
lowercase_ , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
snake_case_ : int = text_generator('''This is a test''' , do_sample=lowercase_ , num_return_sequences=2 , return_tensors=lowercase_ )
self.assertEqual(
lowercase_ , [
{'''generated_token_ids''': ANY(lowercase_ )},
{'''generated_token_ids''': ANY(lowercase_ )},
] , )
snake_case_ : Tuple = text_generator.model.config.eos_token_id
snake_case_ : Any = '''<pad>'''
snake_case_ : Optional[Any] = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=lowercase_ , num_return_sequences=2 , batch_size=2 , return_tensors=lowercase_ , )
self.assertEqual(
lowercase_ , [
[
{'''generated_token_ids''': ANY(lowercase_ )},
{'''generated_token_ids''': ANY(lowercase_ )},
],
[
{'''generated_token_ids''': ANY(lowercase_ )},
{'''generated_token_ids''': ANY(lowercase_ )},
],
] , )
@require_tf
def _snake_case ( self : Any ):
snake_case_ : List[str] = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
snake_case_ : List[Any] = text_generator('''This is a test''' , do_sample=lowercase_ )
self.assertEqual(
lowercase_ , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
snake_case_ : Tuple = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=lowercase_ )
self.assertEqual(
lowercase_ , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def _snake_case ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : int ):
snake_case_ : str = TextGenerationPipeline(model=lowercase_ , tokenizer=lowercase_ )
return text_generator, ["This is a test", "Another test"]
def _snake_case ( self : Any ):
snake_case_ : int = '''Hello I believe in'''
snake_case_ : Dict = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ : Optional[Any] = text_generator(lowercase_ )
self.assertEqual(
lowercase_ , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
snake_case_ : Any = text_generator(lowercase_ , stop_sequence=''' fe''' )
self.assertEqual(lowercase_ , [{'''generated_text''': '''Hello I believe in fe'''}] )
def _snake_case ( self : Optional[int] , lowercase_ : str , lowercase_ : List[Any] ):
snake_case_ : Any = text_generator.model
snake_case_ : str = text_generator.tokenizer
snake_case_ : Tuple = text_generator('''This is a test''' )
self.assertEqual(lowercase_ , [{'''generated_text''': ANY(lowercase_ )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
snake_case_ : Any = text_generator('''This is a test''' , return_full_text=lowercase_ )
self.assertEqual(lowercase_ , [{'''generated_text''': ANY(lowercase_ )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
snake_case_ : Optional[Any] = pipeline(task='''text-generation''' , model=lowercase_ , tokenizer=lowercase_ , return_full_text=lowercase_ )
snake_case_ : str = text_generator('''This is a test''' )
self.assertEqual(lowercase_ , [{'''generated_text''': ANY(lowercase_ )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
snake_case_ : List[str] = text_generator('''This is a test''' , return_full_text=lowercase_ )
self.assertEqual(lowercase_ , [{'''generated_text''': ANY(lowercase_ )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
snake_case_ : List[Any] = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=lowercase_ )
self.assertEqual(
lowercase_ , [
[{'''generated_text''': ANY(lowercase_ )}, {'''generated_text''': ANY(lowercase_ )}],
[{'''generated_text''': ANY(lowercase_ )}, {'''generated_text''': ANY(lowercase_ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
snake_case_ : List[Any] = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=lowercase_ )
self.assertEqual(
lowercase_ , [
[{'''generated_text''': ANY(lowercase_ )}, {'''generated_text''': ANY(lowercase_ )}],
[{'''generated_text''': ANY(lowercase_ )}, {'''generated_text''': ANY(lowercase_ )}],
] , )
with self.assertRaises(lowercase_ ):
snake_case_ : int = text_generator('''test''' , return_full_text=lowercase_ , return_text=lowercase_ )
with self.assertRaises(lowercase_ ):
snake_case_ : Dict = text_generator('''test''' , return_full_text=lowercase_ , return_tensors=lowercase_ )
with self.assertRaises(lowercase_ ):
snake_case_ : Dict = text_generator('''test''' , return_text=lowercase_ , return_tensors=lowercase_ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
snake_case_ : str = text_generator('''''' )
self.assertEqual(lowercase_ , [{'''generated_text''': ANY(lowercase_ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
snake_case_ : List[str] = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
snake_case_ : List[Any] = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 500 , max_new_tokens=20 )
snake_case_ : Tuple = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(lowercase_ ):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def _snake_case ( self : Optional[int] ):
import torch
# Classic `model_kwargs`
snake_case_ : List[str] = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
snake_case_ : Tuple = pipe('''This is a test''' )
self.assertEqual(
lowercase_ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
snake_case_ : Optional[Any] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
snake_case_ : Optional[Any] = pipe('''This is a test''' )
self.assertEqual(
lowercase_ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
snake_case_ : Tuple = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
snake_case_ : int = pipe('''This is a test''' )
self.assertEqual(
lowercase_ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def _snake_case ( self : List[str] ):
import torch
snake_case_ : Any = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def _snake_case ( self : Dict ):
import torch
snake_case_ : int = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=lowercase_ , top_p=0.5 )
def _snake_case ( self : int ):
snake_case_ : int = '''Hello world'''
snake_case_ : List[Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
snake_case_ : Optional[Any] = logging.get_logger('''transformers.generation.tf_utils''' )
else:
snake_case_ : Dict = logging.get_logger('''transformers.generation.utils''' )
snake_case_ : Tuple = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(lowercase_ ) as cl:
snake_case_ : List[Any] = text_generator(lowercase_ , max_length=10 , max_new_tokens=1 )
self.assertIn(lowercase_ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(lowercase_ ) as cl:
snake_case_ : int = text_generator(lowercase_ , max_new_tokens=1 )
self.assertNotIn(lowercase_ , cl.out )
with CaptureLogger(lowercase_ ) as cl:
snake_case_ : Optional[Any] = text_generator(lowercase_ , max_length=10 )
self.assertNotIn(lowercase_ , cl.out )
| 155 | 1 |
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
a__ : Dict =[
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"""
""" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"""
""" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.""",
"""The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"""
""" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"""
""" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"""
""" body.""",
"""Amnesty International releases its annual report on the death penalty. The report catalogs the use of"""
""" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"""
""" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"""
""" punishment.""",
]
a__ : Tuple =[
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."""
""" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"""
""" had informed his Lufthansa training school of an episode of severe depression, airline says .""",
"""Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."""
""" Israel and the United States opposed the move, which could open the door to war crimes investigations against"""
""" Israelis .""",
"""Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"""
""" death . Organization claims that governments around the world are using the threat of terrorism to advance"""
""" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"""
""" sentences up by 28% .""",
]
def lowercase__ ( ) -> int:
"""simple docstring"""
__UpperCamelCase = calculate_rouge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bootstrap_aggregation=SCREAMING_SNAKE_CASE__ , rouge_keys=['rouge2', 'rougeL'] )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase = calculate_rouge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bootstrap_aggregation=SCREAMING_SNAKE_CASE__ , rouge_keys=['rouge2'] )
assert (
pd.DataFrame(no_aggregation['rouge2'] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['rouge2'] ).fmeasure.mean()
)
def lowercase__ ( ) -> Dict:
"""simple docstring"""
__UpperCamelCase = """rougeLsum"""
__UpperCamelCase = calculate_rouge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , newline_sep=SCREAMING_SNAKE_CASE__ , rouge_keys=[k] )[k]
__UpperCamelCase = calculate_rouge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , newline_sep=SCREAMING_SNAKE_CASE__ , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowercase__ ( ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = ["""rouge1""", """rouge2""", """rougeL"""]
__UpperCamelCase = calculate_rouge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , newline_sep=SCREAMING_SNAKE_CASE__ , rouge_keys=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase = calculate_rouge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , newline_sep=SCREAMING_SNAKE_CASE__ , rouge_keys=SCREAMING_SNAKE_CASE__ )
assert score_sep == score_no_sep
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
__UpperCamelCase = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , newline_sep=SCREAMING_SNAKE_CASE__ ) == calculate_rouge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , newline_sep=SCREAMING_SNAKE_CASE__ )
def lowercase__ ( ) -> Any:
"""simple docstring"""
__UpperCamelCase = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
__UpperCamelCase = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
__UpperCamelCase = calculate_rouge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rouge_keys=['rougeLsum'] , newline_sep=SCREAMING_SNAKE_CASE__ )["""rougeLsum"""]
__UpperCamelCase = calculate_rouge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rouge_keys=['rougeLsum'] )["""rougeLsum"""]
assert new_score > prev_score
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = Path('examples/seq2seq/test_data/wmt_en_ro' )
__UpperCamelCase = calculate_rouge_path(data_dir.joinpath('test.source' ) , data_dir.joinpath('test.target' ) )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase = calculate_rouge_path(
data_dir.joinpath('test.source' ) , data_dir.joinpath('test.target' ) , bootstrap_aggregation=SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 53 |
from math import sqrt
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> bool:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' must been an int and positive"
lowercase : Union[str, Any] = True
# 0 and 1 are none primes.
if number <= 1:
lowercase : str = False
for divisor in range(2 , int(round(sqrt(SCREAMING_SNAKE_CASE__ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowercase : Any = False
break
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'status' must been from type bool"
return status
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowercase : str = list(range(2 , n + 1 ) )
lowercase : Tuple = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
for j in range(i + 1 , len(SCREAMING_SNAKE_CASE__ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowercase : Tuple = 0
# filters actual prime numbers.
lowercase : int = [x for x in begin_list if x != 0]
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'ans' must been from type list"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n > 2), "'N' must been an int and > 2"
lowercase : Dict = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(SCREAMING_SNAKE_CASE__ ):
ans.append(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'ans' must been from type list"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Tuple:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and number >= 0, "'number' must been an int and >= 0"
lowercase : Tuple = [] # this list will be returns of the function.
# potential prime number factors.
lowercase : Optional[Any] = 2
lowercase : Any = number
if number == 0 or number == 1:
ans.append(SCREAMING_SNAKE_CASE__ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(SCREAMING_SNAKE_CASE__ ):
while quotient != 1:
if is_prime(SCREAMING_SNAKE_CASE__ ) and (quotient % factor == 0):
ans.append(SCREAMING_SNAKE_CASE__ )
quotient /= factor
else:
factor += 1
else:
ans.append(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'ans' must been from type list"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase : Tuple = 0
# prime factorization of 'number'
lowercase : Optional[int] = prime_factorization(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = max(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'ans' must been from type int"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> str:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase : Union[str, Any] = 0
# prime factorization of 'number'
lowercase : Tuple = prime_factorization(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = min(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'ans' must been from type int"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , SCREAMING_SNAKE_CASE__ ), "compare bust been from type bool"
return number % 2 == 0
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , SCREAMING_SNAKE_CASE__ ), "compare bust been from type bool"
return number % 2 != 0
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (number > 2) and is_even(SCREAMING_SNAKE_CASE__ )
), "'number' must been an int, even and > 2"
lowercase : Union[str, Any] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowercase : str = get_prime_numbers(SCREAMING_SNAKE_CASE__ )
lowercase : Any = len(SCREAMING_SNAKE_CASE__ )
# run variable for while-loops.
lowercase : Optional[Any] = 0
lowercase : List[Any] = None
# exit variable. for break up the loops
lowercase : Any = True
while i < len_pn and loop:
lowercase : str = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowercase : Union[str, Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (len(SCREAMING_SNAKE_CASE__ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowercase : Union[str, Any] = 0
while numbera != 0:
lowercase : Optional[int] = numbera % numbera
lowercase : Optional[int] = numbera
lowercase : Dict = rest
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowercase : Dict = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowercase : Optional[Any] = prime_factorization(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = prime_factorization(SCREAMING_SNAKE_CASE__ )
elif numbera == 1 or numbera == 1:
lowercase : Union[str, Any] = []
lowercase : List[str] = []
lowercase : Dict = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = 0
lowercase : Optional[Any] = 0
lowercase : List[str] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowercase : Dict = prime_fac_a.count(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = prime_fac_a.count(SCREAMING_SNAKE_CASE__ )
for _ in range(max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
ans *= n
else:
lowercase : List[Any] = prime_fac_a.count(SCREAMING_SNAKE_CASE__ )
for _ in range(SCREAMING_SNAKE_CASE__ ):
ans *= n
done.append(SCREAMING_SNAKE_CASE__ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowercase : Optional[int] = prime_fac_a.count(SCREAMING_SNAKE_CASE__ )
for _ in range(SCREAMING_SNAKE_CASE__ ):
ans *= n
done.append(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Any:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n >= 0), "'number' must been a positive int"
lowercase : Dict = 0
lowercase : List[str] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(SCREAMING_SNAKE_CASE__ ):
ans += 1
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and is_prime(
SCREAMING_SNAKE_CASE__ ), "'ans' must been a prime number and from type int"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
assert (
is_prime(SCREAMING_SNAKE_CASE__ ) and is_prime(SCREAMING_SNAKE_CASE__ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowercase : List[str] = p_number_a + 1 # jump to the next number
lowercase : List[Any] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(SCREAMING_SNAKE_CASE__ ):
number += 1
while number < p_number_a:
ans.append(SCREAMING_SNAKE_CASE__ )
number += 1
# fetch the next prime number.
while not is_prime(SCREAMING_SNAKE_CASE__ ):
number += 1
# precondition
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and ans[0] != p_number_a
and ans[len(SCREAMING_SNAKE_CASE__ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Any:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n >= 1), "'n' must been int and >= 1"
lowercase : Optional[Any] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(SCREAMING_SNAKE_CASE__ )
# precondition
assert ans[0] == 1 and ans[len(SCREAMING_SNAKE_CASE__ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Any:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number > 1
), "'number' must been an int and >= 1"
lowercase : str = get_divisors(SCREAMING_SNAKE_CASE__ )
# precondition
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (divisors[0] == 1)
and (divisors[len(SCREAMING_SNAKE_CASE__ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowercase : Tuple = gcd(abs(SCREAMING_SNAKE_CASE__ ) , abs(SCREAMING_SNAKE_CASE__ ) )
# precondition
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n >= 0), "'n' must been a int and >= 0"
lowercase : List[str] = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n >= 0), "'n' must been an int and >= 0"
lowercase : int = 0
lowercase : Union[str, Any] = 1
lowercase : int = 1 # this will be return
for _ in range(n - 1 ):
lowercase : Optional[int] = ans
ans += fiba
lowercase : Optional[int] = tmp
return ans
| 20 | 0 |
'''simple docstring'''
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = """hf-internal-testing/tiny-random-t5"""
__UpperCamelCase = AutoTokenizer.from_pretrained(lowercase )
__UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
__UpperCamelCase = tokenizer("""This is me""" , return_tensors="""pt""" )
__UpperCamelCase = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
__UpperCamelCase = model.generate(**lowercase )
__UpperCamelCase = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase )
__UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
__UpperCamelCase = model_reloaded.generate(**lowercase )
self.assertTrue(torch.allclose(lowercase , lowercase ) )
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = """hf-internal-testing/tiny-random-t5"""
__UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
__UpperCamelCase = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowercase ):
model.save_pretrained(lowercase )
__UpperCamelCase = model.reverse_bettertransformer()
model.save_pretrained(lowercase )
| 243 |
'''simple docstring'''
from PIL import Image
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = image.size
__UpperCamelCase = 0
__UpperCamelCase = image.load()
for i in range(__A ):
for j in range(__A ):
__UpperCamelCase = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__A ):
for i in range(__A ):
__UpperCamelCase = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
a__ : Optional[int] = mean_threshold(Image.open('path_to_image').convert('L'))
image.save('output_image_path')
| 243 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_lowerCAmelCase :Any = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCAmelCase :Union[str, Any] = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : int=8 ):
_UpperCAmelCase : Optional[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_UpperCAmelCase : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : str=512 , UpperCamelCase__ : int=512 ):
_UpperCAmelCase : Union[str, Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
_UpperCAmelCase : int = np.array(pil_image.convert('''RGB''' ) )
_UpperCAmelCase : int = arr.astype(np.floataa ) / 127.5 - 1
_UpperCAmelCase : List[Any] = np.transpose(_lowerCAmelCase , [2, 0, 1] )
_UpperCAmelCase : Dict = torch.from_numpy(_lowerCAmelCase ).unsqueeze(0 )
return image
class _UpperCAmelCase ( a__ ):
'''simple docstring'''
def __init__( self , A , A , A , ) -> Dict:
super().__init__()
self.register_modules(
unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , movq=__UpperCAmelCase , )
_UpperCAmelCase : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCAmelCase ( self , A , A , A ) -> Union[str, Any]:
# get the original timestep using init_timestep
_UpperCAmelCase : Dict = min(int(num_inference_steps * strength ) , __UpperCAmelCase )
_UpperCAmelCase : Optional[int] = max(num_inference_steps - init_timestep , 0 )
_UpperCAmelCase : int = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __lowerCAmelCase ( self , A , A , A , A , A , A , A=None ) -> int:
if not isinstance(__UpperCAmelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__UpperCAmelCase )}' )
_UpperCAmelCase : int = image.to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
_UpperCAmelCase : Dict = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_UpperCAmelCase : Union[str, Any] = image
else:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(__UpperCAmelCase )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_UpperCAmelCase : Union[str, Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__UpperCAmelCase )
]
_UpperCAmelCase : Union[str, Any] = torch.cat(__UpperCAmelCase , dim=0 )
else:
_UpperCAmelCase : Optional[Any] = self.movq.encode(__UpperCAmelCase ).latent_dist.sample(__UpperCAmelCase )
_UpperCAmelCase : str = self.movq.config.scaling_factor * init_latents
_UpperCAmelCase : List[str] = torch.cat([init_latents] , dim=0 )
_UpperCAmelCase : int = init_latents.shape
_UpperCAmelCase : List[Any] = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=__UpperCAmelCase , dtype=__UpperCAmelCase )
# get latents
_UpperCAmelCase : Optional[Any] = self.scheduler.add_noise(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_UpperCAmelCase : Union[str, Any] = init_latents
return latents
def __lowerCAmelCase ( self , A=0 ) -> Dict:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
_UpperCAmelCase : Dict = torch.device(f'cuda:{gpu_id}' )
_UpperCAmelCase : int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self , A=0 ) -> str:
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
_UpperCAmelCase : int = torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=__UpperCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_UpperCAmelCase : Any = None
for cpu_offloaded_model in [self.unet, self.movq]:
_UpperCAmelCase , _UpperCAmelCase : Tuple = cpu_offload_with_hook(__UpperCAmelCase , __UpperCAmelCase , prev_module_hook=__UpperCAmelCase )
# We'll offload the last model manually.
_UpperCAmelCase : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCAmelCase ( self ) -> Union[str, Any]:
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__UpperCAmelCase , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__UpperCAmelCase )
def __call__( self , A , A , A , A = 5_1_2 , A = 5_1_2 , A = 1_0_0 , A = 4.0 , A = 0.3 , A = 1 , A = None , A = "pil" , A = True , ) -> str:
_UpperCAmelCase : Union[str, Any] = self._execution_device
_UpperCAmelCase : str = guidance_scale > 1.0
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_UpperCAmelCase : Any = torch.cat(__UpperCAmelCase , dim=0 )
_UpperCAmelCase : Tuple = image_embeds.shape[0]
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_UpperCAmelCase : Union[str, Any] = torch.cat(__UpperCAmelCase , dim=0 )
if do_classifier_free_guidance:
_UpperCAmelCase : Tuple = image_embeds.repeat_interleave(__UpperCAmelCase , dim=0 )
_UpperCAmelCase : Dict = negative_image_embeds.repeat_interleave(__UpperCAmelCase , dim=0 )
_UpperCAmelCase : Tuple = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCAmelCase )
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_UpperCAmelCase : Optional[Any] = [image]
if not all(isinstance(__UpperCAmelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f'Input is in incorrect format: {[type(__UpperCAmelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor' )
_UpperCAmelCase : Union[str, Any] = torch.cat([prepare_image(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) for i in image] , dim=0 )
_UpperCAmelCase : Tuple = image.to(dtype=image_embeds.dtype , device=__UpperCAmelCase )
_UpperCAmelCase : str = self.movq.encode(__UpperCAmelCase )['''latents''']
_UpperCAmelCase : int = latents.repeat_interleave(__UpperCAmelCase , dim=0 )
self.scheduler.set_timesteps(__UpperCAmelCase , device=__UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : Dict = self.get_timesteps(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_UpperCAmelCase : List[Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_UpperCAmelCase , _UpperCAmelCase : str = downscale_height_and_width(__UpperCAmelCase , __UpperCAmelCase , self.movq_scale_factor )
_UpperCAmelCase : Optional[Any] = self.prepare_latents(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , image_embeds.dtype , __UpperCAmelCase , __UpperCAmelCase )
for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_UpperCAmelCase : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCAmelCase : Any = {'''image_embeds''': image_embeds}
_UpperCAmelCase : Any = self.unet(
sample=__UpperCAmelCase , timestep=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , added_cond_kwargs=__UpperCAmelCase , return_dict=__UpperCAmelCase , )[0]
if do_classifier_free_guidance:
_UpperCAmelCase , _UpperCAmelCase : Any = noise_pred.split(latents.shape[1] , dim=1 )
_UpperCAmelCase , _UpperCAmelCase : Dict = noise_pred.chunk(2 )
_UpperCAmelCase , _UpperCAmelCase : Any = variance_pred.chunk(2 )
_UpperCAmelCase : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_UpperCAmelCase : Any = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_UpperCAmelCase , _UpperCAmelCase : Any = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase : Any = self.scheduler.step(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase , )[0]
# post-processing
_UpperCAmelCase : Union[str, Any] = self.movq.decode(__UpperCAmelCase , force_not_quantize=__UpperCAmelCase )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
_UpperCAmelCase : Optional[Any] = image * 0.5 + 0.5
_UpperCAmelCase : int = image.clamp(0 , 1 )
_UpperCAmelCase : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_UpperCAmelCase : Dict = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 263 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
__snake_case = logging.get_logger(__name__)
class __lowerCamelCase ( a__ ):
'''simple docstring'''
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> None:
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase ) | 320 | 0 |
'''simple docstring'''
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
lowerCAmelCase :Tuple = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
lowerCAmelCase :str = [0, 2_5, 5_0]
lowerCAmelCase :Any = [2_5, 5_0, 7_5]
lowerCAmelCase :Optional[Any] = fuzz.membership.trimf(X, abca)
lowerCAmelCase :Union[str, Any] = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
lowerCAmelCase :Optional[Any] = np.ones(7_5)
lowerCAmelCase :List[Any] = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
lowerCAmelCase :Optional[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
lowerCAmelCase :Any = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
lowerCAmelCase :List[str] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
lowerCAmelCase :List[Any] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
lowerCAmelCase :Union[str, Any] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
lowerCAmelCase :Union[str, Any] = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
lowerCAmelCase :Optional[int] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
lowerCAmelCase :Optional[int] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show() | 275 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCAmelCase :Optional[Any] = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
if isinstance(lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(lowerCAmelCase , PIL.Image.Image ):
__magic_name__ : List[Any] = [image]
__magic_name__ : List[Any] = [trans(img.convert('RGB' ) ) for img in image]
__magic_name__ : Dict = torch.stack(lowerCAmelCase )
return image
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , _A : str , _A : int ) -> Dict:
super().__init__()
# make sure scheduler can always be converted to DDIM
__magic_name__ : Optional[int] = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_A , scheduler=_A )
def __lowerCAmelCase ( self : Union[str, Any] , _A : Union[str, Any] ) -> Optional[int]:
if strength < 0 or strength > 1:
raise ValueError(F'The value of strength should in [0.0, 1.0] but is {strength}' )
def __lowerCAmelCase ( self : Any , _A : List[str] , _A : Optional[Any] , _A : int ) -> List[Any]:
# get the original timestep using init_timestep
__magic_name__ : Tuple = min(int(num_inference_steps * strength ) , _A )
__magic_name__ : Any = max(num_inference_steps - init_timestep , 0 )
__magic_name__ : List[str] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __lowerCAmelCase ( self : Any , _A : str , _A : Optional[int] , _A : Tuple , _A : List[str] , _A : str , _A : Optional[int]=None ) -> Dict:
if not isinstance(_A , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_A )}' )
__magic_name__ : Union[str, Any] = image.to(device=_A , dtype=_A )
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(_A )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
__magic_name__ : Tuple = init_latents.shape
__magic_name__ : Any = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
# get latents
print('add noise to latents at timestep' , _A )
__magic_name__ : List[str] = self.scheduler.add_noise(_A , _A , _A )
__magic_name__ : List[str] = init_latents
return latents
@torch.no_grad()
def __call__( self : Tuple , _A : Union[torch.FloatTensor, PIL.Image.Image] = None , _A : float = 0.8 , _A : int = 1 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : float = 0.0 , _A : int = 50 , _A : Optional[bool] = None , _A : Optional[str] = "pil" , _A : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
self.check_inputs(_A )
# 2. Preprocess image
__magic_name__ : int = preprocess(_A )
# 3. set timesteps
self.scheduler.set_timesteps(_A , device=self.device )
__magic_name__ , __magic_name__ : Dict = self.get_timesteps(_A , _A , self.device )
__magic_name__ : Dict = timesteps[:1].repeat(_A )
# 4. Prepare latent variables
__magic_name__ : Optional[Any] = self.prepare_latents(_A , _A , _A , self.unet.dtype , self.device , _A )
__magic_name__ : Optional[Any] = latents
# 5. Denoising loop
for t in self.progress_bar(_A ):
# 1. predict noise model_output
__magic_name__ : Dict = self.unet(_A , _A ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__magic_name__ : List[Any] = self.scheduler.step(
_A , _A , _A , eta=_A , use_clipped_model_output=_A , generator=_A , ).prev_sample
__magic_name__ : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
__magic_name__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__magic_name__ : Dict = self.numpy_to_pil(_A )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_A ) | 275 | 1 |
'''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__a = logging.get_logger(__name__)
def __UpperCAmelCase ( a_: str=None, a_: Dict=None ):
return field(default_factory=lambda: default, metadata=a_ )
@dataclass
class A__ :
"""simple docstring"""
UpperCamelCase_ : List[str] = list_field(
default=[] , metadata={
'''help''': (
'''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'''
''' of all available models'''
)
} , )
UpperCamelCase_ : List[int] = list_field(
default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} )
UpperCamelCase_ : List[int] = list_field(
default=[8, 32, 1_28, 5_12] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , )
UpperCamelCase_ : bool = field(
default=UpperCamelCase , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , )
UpperCamelCase_ : bool = field(
default=UpperCamelCase , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , )
UpperCamelCase_ : bool = field(
default=UpperCamelCase , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} )
UpperCamelCase_ : bool = field(default=UpperCamelCase , metadata={'''help''': '''Use FP16 to accelerate inference.'''} )
UpperCamelCase_ : bool = field(default=UpperCamelCase , metadata={'''help''': '''Benchmark training of model'''} )
UpperCamelCase_ : bool = field(default=UpperCamelCase , metadata={'''help''': '''Verbose memory tracing'''} )
UpperCamelCase_ : bool = field(
default=UpperCamelCase , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , )
UpperCamelCase_ : bool = field(
default=UpperCamelCase , metadata={
'''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'''
} , )
UpperCamelCase_ : bool = field(default=UpperCamelCase , metadata={'''help''': '''Trace memory line by line'''} )
UpperCamelCase_ : bool = field(default=UpperCamelCase , metadata={'''help''': '''Save result to a CSV file'''} )
UpperCamelCase_ : bool = field(default=UpperCamelCase , metadata={'''help''': '''Save all print statements in a log file'''} )
UpperCamelCase_ : bool = field(default=UpperCamelCase , metadata={'''help''': '''Whether to print environment information'''} )
UpperCamelCase_ : bool = field(
default=UpperCamelCase , metadata={
'''help''': (
'''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'''
''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'''
''' for debugging / testing and on TPU.'''
)
} , )
UpperCamelCase_ : str = field(
default=f'''inference_time_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , )
UpperCamelCase_ : str = field(
default=f'''inference_memory_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , )
UpperCamelCase_ : str = field(
default=f'''train_time_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , )
UpperCamelCase_ : str = field(
default=f'''train_memory_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , )
UpperCamelCase_ : str = field(
default=f'''env_info_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving environment information.'''} , )
UpperCamelCase_ : str = field(
default=f'''log_{round(time() )}.csv''' , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , )
UpperCamelCase_ : int = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} )
UpperCamelCase_ : bool = field(
default=UpperCamelCase , metadata={
'''help''': (
'''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'''
''' model weights.'''
)
} , )
def _lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
warnings.warn(
F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
" are deprecated in general and it is advised to use external Benchmarking libraries "
" to benchmark Transformer models." , lowerCAmelCase__ , )
def _lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def _lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
if len(self.models ) <= 0:
raise ValueError(
"Please make sure you provide at least one model name / model identifier, *e.g.* `--models"
" bert-base-cased` or `args.models = ['bert-base-cased']." )
return self.models
@property
def _lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("Multiprocessing is currently not possible on TPU." )
return False
else:
return True | 145 | '''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__a = logging.get_logger(__name__) # pylint: disable=invalid-name
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : CLIPSegForImageSegmentation , lowerCAmelCase__ : CLIPSegProcessor , lowerCAmelCase__ : AutoencoderKL , lowerCAmelCase__ : CLIPTextModel , lowerCAmelCase__ : CLIPTokenizer , lowerCAmelCase__ : UNetaDConditionModel , lowerCAmelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCAmelCase__ : StableDiffusionSafetyChecker , lowerCAmelCase__ : CLIPImageProcessor , ) -> Dict:
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
_UpperCAmelCase : str = (
F"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
F""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , lowerCAmelCase__ , standard_warn=lowerCAmelCase__ )
_UpperCAmelCase : Any = dict(scheduler.config )
_UpperCAmelCase : Tuple = 1
_UpperCAmelCase : Optional[Any] = FrozenDict(lowerCAmelCase__ )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
_UpperCAmelCase : Union[str, Any] = (
F"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , lowerCAmelCase__ , standard_warn=lowerCAmelCase__ )
_UpperCAmelCase : List[str] = dict(scheduler.config )
_UpperCAmelCase : Optional[Any] = True
_UpperCAmelCase : Dict = FrozenDict(lowerCAmelCase__ )
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=lowerCAmelCase__ , segmentation_processor=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , )
def _lowerCAmelCase ( self : str , lowerCAmelCase__ : Optional[Union[str, int]] = "auto" ) -> Optional[int]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_UpperCAmelCase : Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase__ )
def _lowerCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
self.enable_attention_slicing(lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_UpperCAmelCase : Dict = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase__ , lowerCAmelCase__ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCAmelCase__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Optional[int] , lowerCAmelCase__ : Union[str, List[str]] , lowerCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image] , lowerCAmelCase__ : str , lowerCAmelCase__ : int = 5_1_2 , lowerCAmelCase__ : int = 5_1_2 , lowerCAmelCase__ : int = 5_0 , lowerCAmelCase__ : float = 7.5 , lowerCAmelCase__ : Optional[Union[str, List[str]]] = None , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : float = 0.0 , lowerCAmelCase__ : Optional[torch.Generator] = None , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , lowerCAmelCase__ : Optional[str] = "pil" , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase__ : int = 1 , **lowerCAmelCase__ : Any , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : List[str] = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
_UpperCAmelCase : List[Any] = self.segmentation_model(**lowerCAmelCase__ )
_UpperCAmelCase : Any = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
_UpperCAmelCase : str = self.numpy_to_pil(lowerCAmelCase__ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
_UpperCAmelCase : Union[str, Any] = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , ) | 145 | 1 |
'''simple docstring'''
import qiskit
def UpperCamelCase_ ( A__ : int , A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
lowerCAmelCase_ : Dict = qiskit.QuantumCircuit(UpperCamelCase__ , UpperCamelCase__ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
lowerCAmelCase_ : List[str] = qiskit.execute(UpperCamelCase__ , UpperCamelCase__ , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(UpperCamelCase__ )
if __name__ == "__main__":
__A : str = single_qubit_measure(2, 2)
print(F'''Total count for various states are: {counts}''')
| 367 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
__A : Any = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A : int = {
"vocab_file": {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt",
},
"tokenizer_file": {
"unc-nlp/lxmert-base-uncased": (
"https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__A : List[Any] = {
"unc-nlp/lxmert-base-uncased": 512,
}
__A : Optional[Any] = {
"unc-nlp/lxmert-base-uncased": {"do_lower_case": True},
}
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_INIT_CONFIGURATION
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = LxmertTokenizer
def __init__( self : Dict , lowerCamelCase : Tuple=None , lowerCamelCase : List[str]=None , lowerCamelCase : Dict=True , lowerCamelCase : List[Any]="[UNK]" , lowerCamelCase : Tuple="[SEP]" , lowerCamelCase : str="[PAD]" , lowerCamelCase : Any="[CLS]" , lowerCamelCase : str="[MASK]" , lowerCamelCase : Dict=True , lowerCamelCase : Union[str, Any]=None , **lowerCamelCase : Optional[int] , ) -> Optional[Any]:
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , do_lower_case=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , pad_token=lowerCamelCase , cls_token=lowerCamelCase , mask_token=lowerCamelCase , tokenize_chinese_chars=lowerCamelCase , strip_accents=lowerCamelCase , **lowerCamelCase , )
lowerCAmelCase_ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCamelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCamelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCamelCase ) != tokenize_chinese_chars
):
lowerCAmelCase_ : Tuple = getattr(lowerCamelCase , normalizer_state.pop("""type""" ) )
lowerCAmelCase_ : Optional[Any] = do_lower_case
lowerCAmelCase_ : Dict = strip_accents
lowerCAmelCase_ : List[Any] = tokenize_chinese_chars
lowerCAmelCase_ : List[Any] = normalizer_class(**lowerCamelCase )
lowerCAmelCase_ : Tuple = do_lower_case
def __lowercase ( self : Tuple , lowerCamelCase : str , lowerCamelCase : Optional[Any]=None ) -> int:
lowerCAmelCase_ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowercase ( self : str , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase_ : str = [self.sep_token_id]
lowerCAmelCase_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : Tuple , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
lowerCAmelCase_ : int = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
| 89 | 0 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def lowerCAmelCase__ ( a__: Any , a__: Tuple , a__: Union[str, Any] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = hf_hub_url(repo_id=a__ , path=a__ , revision=a__ )
assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(a__ )}'''
| 329 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowerCAmelCase__ ( *a__: str , a__: Optional[Union[Dict, Any]] = None , a__: Dict=True , a__: Any=2 ) -> Union[str, Any]:
'''simple docstring'''
from .. import __version__
_UpperCAmelCase = take_from
_UpperCAmelCase = ()
if not isinstance(args[0] , a__ ):
_UpperCAmelCase = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(a__ ).base_version ) >= version.parse(a__ ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
_UpperCAmelCase = None
if isinstance(a__ , a__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(a__ ),)
_UpperCAmelCase = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(a__ , a__ ):
values += (getattr(a__ , a__ ),)
_UpperCAmelCase = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
_UpperCAmelCase = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
_UpperCAmelCase = warning + ' ' if standard_warn else ''
warnings.warn(warning + message , a__ , stacklevel=a__ )
if isinstance(a__ , a__ ) and len(a__ ) > 0:
_UpperCAmelCase = inspect.getouterframes(inspect.currentframe() )[1]
_UpperCAmelCase = call_frame.filename
_UpperCAmelCase = call_frame.lineno
_UpperCAmelCase = call_frame.function
_UpperCAmelCase , _UpperCAmelCase = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(a__ ) == 0:
return
elif len(a__ ) == 1:
return values[0]
return values
| 329 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : list , snake_case : int = 0 ) -> list:
"""simple docstring"""
a : int = length or len(snake_case )
a : Optional[Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
a : List[Any] = list_data[i + 1], list_data[i]
a : Union[str, Any] = True
return list_data if not swapped else bubble_sort(snake_case , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase : List[str] = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 345 | 0 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
__a : Any = logging.get_logger(__name__)
def UpperCAmelCase ( lowercase , lowercase , lowercase ):
"""simple docstring"""
__lowercase = UniSpeechSatForSequenceClassification.from_pretrained(lowercase , config=lowercase )
__lowercase = downstream_dict['''projector.weight''']
__lowercase = downstream_dict['''projector.bias''']
__lowercase = downstream_dict['''model.post_net.linear.weight''']
__lowercase = downstream_dict['''model.post_net.linear.bias''']
return model
def UpperCAmelCase ( lowercase , lowercase , lowercase ):
"""simple docstring"""
__lowercase = UniSpeechSatForAudioFrameClassification.from_pretrained(lowercase , config=lowercase )
__lowercase = downstream_dict['''model.linear.weight''']
__lowercase = downstream_dict['''model.linear.bias''']
return model
def UpperCAmelCase ( lowercase , lowercase , lowercase ):
"""simple docstring"""
__lowercase = UniSpeechSatForXVector.from_pretrained(lowercase , config=lowercase )
__lowercase = downstream_dict['''connector.weight''']
__lowercase = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__lowercase = downstream_dict[
F"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
__lowercase = downstream_dict[F"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__lowercase = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
__lowercase = torch.load(lowercase , map_location='''cpu''' )
__lowercase = checkpoint['''Downstream''']
__lowercase = UniSpeechSatConfig.from_pretrained(lowercase )
__lowercase = WavaVecaFeatureExtractor.from_pretrained(
lowercase , return_attention_mask=lowercase , do_normalize=lowercase )
__lowercase = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
__lowercase = convert_classification(lowercase , lowercase , lowercase )
elif arch.endswith('''ForAudioFrameClassification''' ):
__lowercase = convert_diarization(lowercase , lowercase , lowercase )
elif arch.endswith('''ForXVector''' ):
__lowercase = convert_xvector(lowercase , lowercase , lowercase )
else:
raise NotImplementedError(F"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
__lowercase = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(lowercase )
hf_model.save_pretrained(lowercase )
if __name__ == "__main__":
__a : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
__a : List[Any] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path) | 210 | from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def UpperCAmelCase ( lowercase , lowercase , lowercase=None , lowercase=None ):
"""simple docstring"""
if attention_mask is None:
__lowercase = tf.cast(tf.math.not_equal(lowercase , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class _UpperCamelCase :
"""simple docstring"""
__a : Tuple = OPTConfig
__a : int = {}
__a : Dict = '''gelu'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=99 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=20 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=16 , lowerCAmelCase__=16 , ) -> Tuple:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = eos_token_id
__lowercase = pad_token_id
__lowercase = bos_token_id
__lowercase = embed_dim
__lowercase = word_embed_proj_dim
__lowercase = False
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowercase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowercase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowercase = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowerCAmelCase__ , **self.config_updates , )
__lowercase = prepare_opt_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ )
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
__lowercase = TFOPTModel(config=lowerCAmelCase__ )
__lowercase = inputs_dict['''input_ids''']
__lowercase = input_ids[:1, :]
__lowercase = inputs_dict['''attention_mask'''][:1, :]
__lowercase = 1
# first forward pass
__lowercase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
__lowercase , __lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowercase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowercase = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowercase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowercase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
__lowercase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowercase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowercase = output_from_no_past[:, -3:, random_slice_idx]
__lowercase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase__ , lowerCAmelCase__ , rtol=1E-3 )
@require_tf
class _UpperCamelCase ( _UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
__a : int = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__a : Optional[Any] = (TFOPTForCausalLM,) if is_tf_available() else ()
__a : Dict = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
__a : List[str] = False
__a : Optional[Any] = False
__a : Union[str, Any] = False
__a : List[Any] = 10
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = TFOPTModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowerCAmelCase__ , lowerCAmelCase__ ):
if hasattr(lowerCAmelCase__ , '''weight''' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowerCAmelCase__ , '''weight''' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
__lowercase = model_class(config=lowerCAmelCase__ )
__lowercase = _get_word_embedding_weight(lowerCAmelCase__ , model.get_input_embeddings() )
__lowercase = _get_word_embedding_weight(lowerCAmelCase__ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowerCAmelCase__ )
__lowercase = _get_word_embedding_weight(lowerCAmelCase__ , model.get_input_embeddings() )
__lowercase = _get_word_embedding_weight(lowerCAmelCase__ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__lowercase = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowerCAmelCase__ )
# check that weights remain the same after resizing
__lowercase = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowercase = False
self.assertTrue(lowerCAmelCase__ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowerCAmelCase__ )
__lowercase = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowercase = False
self.assertTrue(lowerCAmelCase__ )
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
return tf.constant(lowercase , dtype=tf.intaa )
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
__a : List[str] = 99
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = tf.ones((4, 1) , dtype=tf.intaa ) * 2
__lowercase = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
__lowercase = input_ids.shape[0]
__lowercase = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = TFOPTModel.from_pretrained('''facebook/opt-350m''' )
__lowercase = _long_tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
__lowercase = tf.not_equal(lowerCAmelCase__ , model.config.pad_token_id )
with tf.GradientTape():
__lowercase = model(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ).last_hidden_state
__lowercase = (1, 11, 5_12)
self.assertEqual(output.shape , lowerCAmelCase__ )
__lowercase = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=4E-3 ) )
__lowercase = tf.function(lowerCAmelCase__ , jit_compile=lowerCAmelCase__ )
__lowercase = xla_generate(lowerCAmelCase__ , lowerCAmelCase__ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=4E-2 ) )
@require_tf
@slow
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
super().setUp()
__lowercase = '''facebook/opt-350m'''
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = TFOPTForCausalLM.from_pretrained(self.path_model )
__lowercase = GPTaTokenizer.from_pretrained(self.path_model )
__lowercase = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__lowercase = tokenizer(lowerCAmelCase__ , return_tensors='''tf''' , padding=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
__lowercase = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
__lowercase = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-4 ) )
__lowercase = tf.function(lowerCAmelCase__ , jit_compile=lowerCAmelCase__ )
__lowercase = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-4 ) )
@require_tf
@slow
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = '''facebook/opt-125m'''
__lowercase = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__lowercase = []
__lowercase = GPTaTokenizer.from_pretrained(lowerCAmelCase__ )
__lowercase = TFOPTForCausalLM.from_pretrained(lowerCAmelCase__ )
for prompt in self.prompts:
__lowercase = tokenizer(lowerCAmelCase__ , return_tensors='''tf''' ).input_ids
__lowercase = model.generate(lowerCAmelCase__ , max_length=10 )
__lowercase = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = '''facebook/opt-350m'''
__lowercase = GPTaTokenizer.from_pretrained(lowerCAmelCase__ )
__lowercase = TFOPTForCausalLM.from_pretrained(lowerCAmelCase__ )
__lowercase = '''left'''
# use different length sentences to test batching
__lowercase = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__lowercase = tokenizer(lowerCAmelCase__ , return_tensors='''tf''' , padding=lowerCAmelCase__ )
__lowercase = inputs['''input_ids''']
__lowercase = model.generate(input_ids=lowerCAmelCase__ , attention_mask=inputs['''attention_mask'''] )
__lowercase = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
__lowercase = model.generate(input_ids=lowerCAmelCase__ )
__lowercase = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] , tf.intaa ) )
__lowercase = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
__lowercase = model.generate(input_ids=lowerCAmelCase__ , max_length=model.config.max_length - num_paddings )
__lowercase = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
__lowercase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase__ )
__lowercase = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase__ )
__lowercase = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , [non_padded_sentence, padded_sentence] )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = '''facebook/opt-350m'''
__lowercase = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__lowercase = []
__lowercase = GPTaTokenizer.from_pretrained(lowerCAmelCase__ )
__lowercase = TFOPTForCausalLM.from_pretrained(lowerCAmelCase__ )
for prompt in self.prompts:
__lowercase = tokenizer(lowerCAmelCase__ , return_tensors='''tf''' ).input_ids
__lowercase = model.generate(lowerCAmelCase__ , max_length=10 )
__lowercase = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) | 210 | 1 |
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
SCREAMING_SNAKE_CASE__ = datasets.logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = '\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = "{COMET}: A Neural Framework for {MT} Evaluation",\n author = "Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon",\n booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",\n month = nov,\n year = "2020",\n address = "Online",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",\n pages = "2685--2702",\n}\n'
SCREAMING_SNAKE_CASE__ = '\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n'
SCREAMING_SNAKE_CASE__ = '\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric(\'comet\')\n >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use\n >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]\n >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]\n >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results["scores"]])\n [0.19, 0.92]\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def A__ ( self ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://unbabel.github.io/COMET/html/index.html""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""sources""": datasets.Value("""string""" , id="""sequence""" ),
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/Unbabel/COMET"""] , reference_urls=[
"""https://github.com/Unbabel/COMET""",
"""https://www.aclweb.org/anthology/2020.emnlp-main.213/""",
"""http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6""",
] , )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
if self.config_name == "default":
UpperCamelCase = comet.load_from_checkpoint(comet.download_model("""wmt20-comet-da""" ) )
else:
UpperCamelCase = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ) -> Any:
"""simple docstring"""
if gpus is None:
UpperCamelCase = 1 if torch.cuda.is_available() else 0
UpperCamelCase = {"""src""": sources, """mt""": predictions, """ref""": references}
UpperCamelCase = [dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) for t in zip(*data.values() )]
UpperCamelCase ,UpperCamelCase = self.scorer.predict(_SCREAMING_SNAKE_CASE , gpus=_SCREAMING_SNAKE_CASE , progress_bar=_SCREAMING_SNAKE_CASE )
return {"mean_score": mean_score, "scores": scores}
| 359 |
'''simple docstring'''
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def lowercase__ ( __UpperCamelCase )-> str:
return "".join(sorted(__UpperCamelCase ) )
def lowercase__ ( __UpperCamelCase )-> list[str]:
return word_by_signature[signature(__UpperCamelCase )]
SCREAMING_SNAKE_CASE__ = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
SCREAMING_SNAKE_CASE__ = sorted({word.strip().lower() for word in data.splitlines()})
SCREAMING_SNAKE_CASE__ = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 183 | 0 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
__magic_name__ : Dict = MobileBertConfig.from_json_file(_A )
print(f'Building PyTorch model from configuration: {config}' )
__magic_name__ : Tuple = MobileBertForPreTraining(_A )
# Load weights from tf checkpoint
__magic_name__ : int = load_tf_weights_in_mobilebert(_A, _A, _A )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict(), _A )
if __name__ == "__main__":
__magic_name__: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__magic_name__: Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 342 |
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : List[Any] = [0] * len(_A )
__magic_name__ : List[str] = []
__magic_name__ : List[str] = [1] * len(_A )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_A ) ):
if indegree[i] == 0:
queue.append(_A )
while queue:
__magic_name__ : Dict = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
__magic_name__ : int = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(_A )
print(max(_A ) )
# Adjacency list of Graph
__magic_name__: str = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 342 | 1 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__snake_case = {
'''E''': 12.70,
'''T''': 9.06,
'''A''': 8.17,
'''O''': 7.51,
'''I''': 6.97,
'''N''': 6.75,
'''S''': 6.33,
'''H''': 6.09,
'''R''': 5.99,
'''D''': 4.25,
'''L''': 4.03,
'''C''': 2.78,
'''U''': 2.76,
'''M''': 2.41,
'''W''': 2.36,
'''F''': 2.23,
'''G''': 2.02,
'''Y''': 1.97,
'''P''': 1.93,
'''B''': 1.29,
'''V''': 0.98,
'''K''': 0.77,
'''J''': 0.15,
'''X''': 0.15,
'''Q''': 0.10,
'''Z''': 0.07,
}
__snake_case = '''ETAOINSHRDLCUMWFGYPBVKJXQZ'''
__snake_case = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_a = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def A_ ( _lowerCAmelCase : tuple ):
"""simple docstring"""
return x[0]
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_a = get_letter_count(_lowerCAmelCase )
_a = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(_lowerCAmelCase )
_a = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find, reverse=_lowerCAmelCase )
_a = ''''''.join(freq_to_letter[freq] )
_a = list(freq_to_letter_str.items() )
freq_pairs.sort(key=_lowerCAmelCase, reverse=_lowerCAmelCase )
_a = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_a = get_frequency_order(_lowerCAmelCase )
_a = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod() | 153 |
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __lowerCamelCase ( a__ ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = tempfile.mkdtemp()
_a = 8
# DPR tok
_a = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_a = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
_a = os.path.join(__UpperCAmelCase , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
_a = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_a = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
_a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_a = {'''unk_token''': '''<unk>'''}
_a = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
_a = os.path.join(__UpperCAmelCase , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(__UpperCAmelCase , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__UpperCAmelCase ) )
def _UpperCAmelCase ( self ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def _UpperCAmelCase ( self ) -> DPRContextEncoderTokenizer:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def _UpperCAmelCase ( self ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def _UpperCAmelCase ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ) -> str:
_a = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = self.get_dummy_dataset()
_a = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_a = dataset
_a = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> int:
_a = self.get_dummy_dataset()
_a = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
_a = os.path.join(self.tmpdirname , '''dataset''' )
_a = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
_a = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
_a = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __UpperCAmelCase ) , )
return retriever
def _UpperCAmelCase ( self ) -> int:
_a = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
_a = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
_a = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
_a = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(__UpperCAmelCase , open(__UpperCAmelCase , '''wb''' ) )
_a = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
_a = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def _UpperCAmelCase ( self ) -> int:
_a = 1
_a = self.get_dummy_canonical_hf_index_retriever()
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a , _a , _a = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _UpperCAmelCase ( self ) -> List[Any]:
_a = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_a = self.get_dummy_dataset()
retriever.save_pretrained(__UpperCAmelCase )
_a = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def _UpperCAmelCase ( self ) -> Dict:
_a = 1
_a = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a , _a , _a = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _UpperCAmelCase ( self ) -> int:
_a = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase )
_a = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def _UpperCAmelCase ( self ) -> Any:
_a = 1
_a = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a , _a , _a = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _UpperCAmelCase ( self ) -> Tuple:
_a = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase )
_a = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def _UpperCAmelCase ( self ) -> List[str]:
_a = 1
_a = self.get_dummy_legacy_index_retriever()
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a , _a , _a = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase )
_a = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def _UpperCAmelCase ( self ) -> Any:
import torch
_a = 1
_a = self.get_dummy_canonical_hf_index_retriever()
_a = [[5, 7], [10, 11]]
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a = retriever(__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase )
_a , _a , _a = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
_a = retriever(
__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase , return_tensors='''pt''' , )
_a , _a , _a , _a = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def _UpperCAmelCase ( self ) -> List[Any]:
_a = self.get_dpr_ctx_encoder_tokenizer()
_a = 1
_a = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
retriever.set_ctx_encoder_tokenizer(__UpperCAmelCase )
_a = [[5, 7], [10, 11]]
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a = retriever(__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase )
self.assertEqual(
len(__UpperCAmelCase ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , __UpperCAmelCase ) # check for doc token related keys in dictionary. | 153 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def _A (lowerCAmelCase__ :Any ) -> Optional[Any]:
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def _A (lowerCAmelCase__ :List[Any] ) -> str:
'''simple docstring'''
_a = create_tensor(lowerCAmelCase__ )
_a = gather(lowerCAmelCase__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def _A (lowerCAmelCase__ :Dict ) -> int:
'''simple docstring'''
_a = [state.process_index]
_a = gather_object(lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == state.num_processes, f'{gathered_obj}, {len(lowerCAmelCase__ )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), f'{gathered_obj} != {list(range(state.num_processes ) )}'
def _A (lowerCAmelCase__ :Tuple ) -> int:
'''simple docstring'''
_a = create_tensor(lowerCAmelCase__ )
_a = broadcast(lowerCAmelCase__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def _A (lowerCAmelCase__ :List[str] ) -> Any:
'''simple docstring'''
if state.is_main_process:
_a = torch.arange(state.num_processes + 1 ).to(state.device )
else:
_a = torch.arange(state.num_processes ).to(state.device )
_a = pad_across_processes(lowerCAmelCase__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def _A (lowerCAmelCase__ :int ) -> str:
'''simple docstring'''
if state.num_processes != 2:
return
_a = create_tensor(lowerCAmelCase__ )
_a = reduce(lowerCAmelCase__ , 'sum' )
_a = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ ), f'{reduced_tensor} != {truth_tensor}'
def _A (lowerCAmelCase__ :Optional[Any] ) -> List[str]:
'''simple docstring'''
if state.num_processes != 2:
return
_a = create_tensor(lowerCAmelCase__ )
_a = reduce(lowerCAmelCase__ , 'mean' )
_a = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ ), f'{reduced_tensor} != {truth_tensor}'
def _A (lowerCAmelCase__ :int ) -> Tuple:
'''simple docstring'''
main()
def _A () -> List[str]:
'''simple docstring'''
_a = PartialState()
state.print(f'State: {state}' )
state.print('testing gather' )
test_gather(lowerCAmelCase__ )
state.print('testing gather_object' )
test_gather_object(lowerCAmelCase__ )
state.print('testing broadcast' )
test_broadcast(lowerCAmelCase__ )
state.print('testing pad_across_processes' )
test_pad_across_processes(lowerCAmelCase__ )
state.print('testing reduce_sum' )
test_reduce_sum(lowerCAmelCase__ )
state.print('testing reduce_mean' )
test_reduce_mean(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 168 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
a_ : str = logging.get_logger(__name__)
a_ : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
a_ : List[str] = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
a_ : Any = {
"yjernite/retribert-base-uncased": 5_1_2,
}
a_ : Tuple = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = RetriBertTokenizer
_lowerCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self , __magic_name__=None , __magic_name__=None , __magic_name__=True , __magic_name__="[UNK]" , __magic_name__="[SEP]" , __magic_name__="[PAD]" , __magic_name__="[CLS]" , __magic_name__="[MASK]" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ) -> Tuple:
super().__init__(
__magic_name__ , tokenizer_file=__magic_name__ , do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , tokenize_chinese_chars=__magic_name__ , strip_accents=__magic_name__ , **__magic_name__ , )
_a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __magic_name__ ) != do_lower_case
or normalizer_state.get('strip_accents' , __magic_name__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __magic_name__ ) != tokenize_chinese_chars
):
_a = getattr(__magic_name__ , normalizer_state.pop('type' ) )
_a = do_lower_case
_a = strip_accents
_a = tokenize_chinese_chars
_a = normalizer_class(**__magic_name__ )
_a = do_lower_case
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__=None ) -> Union[str, Any]:
_a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ) -> Tuple[str]:
_a = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
| 168 | 1 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (DDIMParallelScheduler,)
_SCREAMING_SNAKE_CASE = (("""eta""", 0.0), ("""num_inference_steps""", 50))
def A ( self : int , **UpperCamelCase__ : Tuple ):
"""simple docstring"""
UpperCamelCase = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**UpperCamelCase__ )
return config
def A ( self : int , **UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(**UpperCamelCase__ )
UpperCamelCase = scheduler_class(**UpperCamelCase__ )
UpperCamelCase , UpperCamelCase = 1_0, 0.0
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__ )
for t in scheduler.timesteps:
UpperCamelCase = model(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
return sample
def A ( self : Any ):
"""simple docstring"""
for timesteps in [1_0_0, 5_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCamelCase__ )
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(steps_offset=1 )
UpperCamelCase = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_0_1, 6_0_1, 4_0_1, 2_0_1, 1] ) )
def A ( self : Union[str, Any] ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCamelCase__ , beta_end=UpperCamelCase__ )
def A ( self : List[Any] ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def A ( self : Optional[int] ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=UpperCamelCase__ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=UpperCamelCase__ )
def A ( self : Dict ):
"""simple docstring"""
self.check_over_configs(thresholding=UpperCamelCase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCamelCase__ , prediction_type=UpperCamelCase__ , sample_max_value=UpperCamelCase__ , )
def A ( self : Optional[int] ):
"""simple docstring"""
for t in [1, 1_0, 4_9]:
self.check_over_forward(time_step=UpperCamelCase__ )
def A ( self : List[Any] ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 1_0, 5_0] , [1_0, 5_0, 5_0_0] ):
self.check_over_forward(time_step=UpperCamelCase__ , num_inference_steps=UpperCamelCase__ )
def A ( self : str ):
"""simple docstring"""
for t, eta in zip([1, 1_0, 4_9] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=UpperCamelCase__ , eta=UpperCamelCase__ )
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**UpperCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_2_0 , 4_0_0 ) - 0.1_4_7_7_1 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_8_0 , 9_6_0 ) - 0.3_2_4_6_0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 , 4_8_6 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 , 9_9_8 ) - 0.0_2 ) ) < 1E-5
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**UpperCamelCase__ )
UpperCamelCase , UpperCamelCase = 1_0, 0.0
scheduler.set_timesteps(UpperCamelCase__ )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = self.dummy_sample_deter + 0.1
UpperCamelCase = self.dummy_sample_deter - 0.1
UpperCamelCase = samplea.shape[0]
UpperCamelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCamelCase = torch.arange(UpperCamelCase__ )[0:3, None].repeat(1 , UpperCamelCase__ )
UpperCamelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCamelCase = scheduler.batch_step_no_noise(UpperCamelCase__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , UpperCamelCase__ )
UpperCamelCase = torch.sum(torch.abs(UpperCamelCase__ ) )
UpperCamelCase = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 1_1_4_7.7_9_0_4 ) < 1E-2
assert abs(result_mean.item() - 0.4_9_8_2 ) < 1E-3
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.full_loop()
UpperCamelCase = torch.sum(torch.abs(UpperCamelCase__ ) )
UpperCamelCase = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 1_7_2.0_0_6_7 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_9_6_7 ) < 1E-3
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.full_loop(prediction_type='v_prediction' )
UpperCamelCase = torch.sum(torch.abs(UpperCamelCase__ ) )
UpperCamelCase = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 5_2.5_3_0_2 ) < 1E-2
assert abs(result_mean.item() - 0.0_6_8_4 ) < 1E-3
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.full_loop(set_alpha_to_one=UpperCamelCase__ , beta_start=0.0_1 )
UpperCamelCase = torch.sum(torch.abs(UpperCamelCase__ ) )
UpperCamelCase = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 1_4_9.8_2_9_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_9_5_1 ) < 1E-3
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.full_loop(set_alpha_to_one=UpperCamelCase__ , beta_start=0.0_1 )
UpperCamelCase = torch.sum(torch.abs(UpperCamelCase__ ) )
UpperCamelCase = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 1_4_9.0_7_8_4 ) < 1E-2
assert abs(result_mean.item() - 0.1_9_4_1 ) < 1E-3
| 369 |
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase , _a ):
"""simple docstring"""
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = load_tool('text-to-speech' )
self.tool.setup()
def A ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = self.tool('hey' )
UpperCamelCase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def A ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = self.tool('hey' )
UpperCamelCase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
| 249 | 0 |
from maths.prime_check import is_prime
def a__ ( A_ ):
'''simple docstring'''
if not isinstance(A_, A_ ):
__magic_name__ = f'''Input value of [number={number}] must be an integer'''
raise TypeError(A_ )
if is_prime(A_ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__lowerCAmelCase : Any = get_logger(__name__)
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Optional[str] = None ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = (
os.path.join(UpperCamelCase__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__magic_name__ = Extractor
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str ) -> str:
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__magic_name__ = os.path.abspath(UpperCamelCase__ )
return os.path.join(self.extract_dir , hash_url_to_filename(UpperCamelCase__ ) )
def _lowercase ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : bool ) -> bool:
"""simple docstring"""
return force_extract or (
not os.path.isfile(UpperCamelCase__ ) and not (os.path.isdir(UpperCamelCase__ ) and os.listdir(UpperCamelCase__ ))
)
def _lowercase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : bool = False ) -> str:
"""simple docstring"""
__magic_name__ = self.extractor.infer_extractor_format(UpperCamelCase__ )
if not extractor_format:
return input_path
__magic_name__ = self._get_output_path(UpperCamelCase__ )
if self._do_extract(UpperCamelCase__ , UpperCamelCase__ ):
self.extractor.extract(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return output_path
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
@classmethod
@abstractmethod
def _lowercase ( cls : List[str] , UpperCamelCase__ : Union[Path, str] , **UpperCamelCase__ : Union[str, Any] ) -> bool:
"""simple docstring"""
...
@staticmethod
@abstractmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
...
class UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
a__ = []
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : int ) -> List[str]:
"""simple docstring"""
with open(UpperCamelCase__ , """rb""" ) as f:
return f.read(UpperCamelCase__ )
@classmethod
def _lowercase ( cls : List[Any] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : bytes = b"" ) -> bool:
"""simple docstring"""
if not magic_number:
__magic_name__ = max(len(UpperCamelCase__ ) for cls_magic_number in cls.magic_numbers )
try:
__magic_name__ = cls.read_magic_number(UpperCamelCase__ , UpperCamelCase__ )
except OSError:
return False
return any(magic_number.startswith(UpperCamelCase__ ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
@classmethod
def _lowercase ( cls : Optional[Any] , UpperCamelCase__ : Union[Path, str] , **UpperCamelCase__ : int ) -> bool:
"""simple docstring"""
return tarfile.is_tarfile(UpperCamelCase__ )
@staticmethod
def _lowercase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
def resolved(UpperCamelCase__ : str ) -> str:
return os.path.realpath(os.path.abspath(UpperCamelCase__ ) )
def badpath(UpperCamelCase__ : str , UpperCamelCase__ : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ).startswith(UpperCamelCase__ )
def badlink(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ) -> bool:
# Links are interpreted relative to the directory containing the link
__magic_name__ = resolved(os.path.join(UpperCamelCase__ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=UpperCamelCase__ )
__magic_name__ = resolved(UpperCamelCase__ )
for finfo in members:
if badpath(finfo.name , UpperCamelCase__ ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(UpperCamelCase__ , UpperCamelCase__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(UpperCamelCase__ , UpperCamelCase__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
__magic_name__ = tarfile.open(UpperCamelCase__ )
tar_file.extractall(UpperCamelCase__ , members=TarExtractor.safemembers(UpperCamelCase__ , UpperCamelCase__ ) )
tar_file.close()
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\x1F\x8B"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
with gzip.open(UpperCamelCase__ , """rb""" ) as gzip_file:
with open(UpperCamelCase__ , """wb""" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def _lowercase ( cls : Union[str, Any] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : bytes = b"" ) -> bool:
"""simple docstring"""
if super().is_extractable(UpperCamelCase__ , magic_number=UpperCamelCase__ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(UpperCamelCase__ , """rb""" ) as fp:
__magic_name__ = _EndRecData(UpperCamelCase__ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__magic_name__ = fp.read(UpperCamelCase__ ) # CD is where we expect it to be
if len(UpperCamelCase__ ) == sizeCentralDir:
__magic_name__ = struct.unpack(UpperCamelCase__ , UpperCamelCase__ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
with zipfile.ZipFile(UpperCamelCase__ , """r""" ) as zip_file:
zip_file.extractall(UpperCamelCase__ )
zip_file.close()
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
with lzma.open(UpperCamelCase__ ) as compressed_file:
with open(UpperCamelCase__ , """wb""" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
__magic_name__ = rarfile.RarFile(UpperCamelCase__ )
rf.extractall(UpperCamelCase__ )
rf.close()
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
__magic_name__ = zstd.ZstdDecompressor()
with open(UpperCamelCase__ , """rb""" ) as ifh, open(UpperCamelCase__ , """wb""" ) as ofh:
dctx.copy_stream(UpperCamelCase__ , UpperCamelCase__ )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\x42\x5A\x68"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
with bza.open(UpperCamelCase__ , """rb""" ) as compressed_file:
with open(UpperCamelCase__ , """wb""" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
with pyazr.SevenZipFile(UpperCamelCase__ , """r""" ) as archive:
archive.extractall(UpperCamelCase__ )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\x04\x22\x4D\x18"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(UpperCamelCase__ , """rb""" ) as compressed_file:
with open(UpperCamelCase__ , """wb""" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ )
class UpperCAmelCase_ :
'''simple docstring'''
a__ = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def _lowercase ( cls : Tuple ) -> Tuple:
"""simple docstring"""
return max(
len(UpperCamelCase__ )
for extractor in cls.extractors.values()
if issubclass(UpperCamelCase__ , UpperCamelCase__ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : int ) -> Union[str, Any]:
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(UpperCamelCase__ , magic_number_length=UpperCamelCase__ )
except OSError:
return b""
@classmethod
def _lowercase ( cls : List[Any] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : bool = False ) -> bool:
"""simple docstring"""
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=UpperCamelCase__ , )
__magic_name__ = cls.infer_extractor_format(UpperCamelCase__ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def _lowercase ( cls : Dict , UpperCamelCase__ : Union[Path, str] ) -> str: # <Added version="2.4.0"/>
"""simple docstring"""
__magic_name__ = cls._get_magic_number_max_length()
__magic_name__ = cls._read_magic_number(UpperCamelCase__ , UpperCamelCase__ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(UpperCamelCase__ , magic_number=UpperCamelCase__ ):
return extractor_format
@classmethod
def _lowercase ( cls : Union[str, Any] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[BaseExtractor] = "deprecated" , ) -> None:
"""simple docstring"""
os.makedirs(os.path.dirname(UpperCamelCase__ ) , exist_ok=UpperCamelCase__ )
# Prevent parallel extractions
__magic_name__ = str(Path(UpperCamelCase__ ).with_suffix(""".lock""" ) )
with FileLock(UpperCamelCase__ ):
shutil.rmtree(UpperCamelCase__ , ignore_errors=UpperCamelCase__ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(UpperCamelCase__ , UpperCamelCase__ ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=UpperCamelCase__ , )
__magic_name__ = extractor if extractor != """deprecated""" else extractor_format
else:
__magic_name__ = cls.extractors[extractor_format]
return extractor.extract(UpperCamelCase__ , UpperCamelCase__ )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=UpperCamelCase__ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(UpperCamelCase__ ):
return extractor.extract(UpperCamelCase__ , UpperCamelCase__ )
| 88 | 1 |
'''simple docstring'''
def lowerCAmelCase__ ( lowerCamelCase : list[int] ,lowerCamelCase : list[int] ):
_A : List[str] = len(lowerCamelCase )
print('The following activities are selected:' )
# The first activity is always selected
_A : Optional[Any] = 0
print(lowerCamelCase ,end=',' )
# Consider rest of the activities
for j in range(lowerCamelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowerCamelCase ,end=',' )
_A : List[Any] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
A : Dict = [1, 3, 0, 5, 8, 5]
A : Dict = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 227 |
'''simple docstring'''
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = 42
a = None
def lowerCAmelCase__ ( lowerCamelCase : int ,lowerCamelCase : Tuple=0.999 ,lowerCamelCase : int="cosine" ,):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase : str ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase : Dict ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
_A : Tuple = []
for i in range(lowerCamelCase ):
_A : Optional[Any] = i / num_diffusion_timesteps
_A : Tuple = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase ) / alpha_bar_fn(lowerCamelCase ) ,lowerCamelCase ) )
return torch.tensor(lowerCamelCase ,dtype=torch.floataa )
class __lowerCamelCase ( a_ , a_ ):
"""simple docstring"""
a = 1
@register_to_config
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : int = 1000 , SCREAMING_SNAKE_CASE : float = 0.0001 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : str = "linear" , SCREAMING_SNAKE_CASE : Optional[Union[np.ndarray, List[float]]] = None , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : str = "epsilon" , SCREAMING_SNAKE_CASE : float = 1.0 , **SCREAMING_SNAKE_CASE : List[str] , ):
if kwargs.get('set_alpha_to_one' , SCREAMING_SNAKE_CASE) is not None:
_A : Tuple = (
'The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'
)
deprecate('set_alpha_to_one' , '1.0.0' , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE)
_A : Tuple = kwargs['set_alpha_to_one']
if trained_betas is not None:
_A : Any = torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.floataa)
elif beta_schedule == "linear":
_A : List[Any] = torch.linspace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=torch.floataa)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_A : List[str] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , SCREAMING_SNAKE_CASE , dtype=torch.floataa) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_A : List[Any] = betas_for_alpha_bar(SCREAMING_SNAKE_CASE)
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}')
_A : Optional[int] = 1.0 - self.betas
_A : Union[str, Any] = torch.cumprod(self.alphas , dim=0)
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_A : Optional[int] = torch.tensor(0.0) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_A : Union[str, Any] = 1.0
# setable values
_A : List[str] = None
_A : Dict = torch.from_numpy(np.arange(0 , SCREAMING_SNAKE_CASE).copy().astype(np.intaa))
def A ( self : str , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : Optional[int] = None):
return sample
def A ( self : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, torch.device] = None):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'
F' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'
F' maximal {self.config.num_train_timesteps} timesteps.')
_A : Optional[Any] = num_inference_steps
_A : List[Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_A : List[str] = (np.arange(0 , SCREAMING_SNAKE_CASE) * step_ratio).round().copy().astype(np.intaa)
_A : int = torch.from_numpy(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE)
self.timesteps += self.config.steps_offset
def A ( self : List[Any] , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE : bool = True , ):
# 1. get previous step value (=t+1)
_A : Union[str, Any] = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_A : List[str] = self.alphas_cumprod[timestep]
_A : List[str] = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_A : List[str] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_A : Any = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_A : List[Any] = model_output
elif self.config.prediction_type == "sample":
_A : List[Any] = model_output
_A : Dict = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_A : List[str] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_A : Optional[int] = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'
' `v_prediction`')
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_A : str = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range)
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_A : Any = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_A : Tuple = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE , pred_original_sample=SCREAMING_SNAKE_CASE)
def __len__( self : List[Any]):
return self.config.num_train_timesteps
| 227 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Union[str, Any] = """microsoft/speecht5_tts"""
UpperCAmelCase : Optional[Any] = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
UpperCAmelCase : str = """text_reader"""
UpperCAmelCase : str = SpeechTaProcessor
UpperCAmelCase : Tuple = SpeechTaForTextToSpeech
UpperCAmelCase : Tuple = SpeechTaHifiGan
UpperCAmelCase : Optional[Any] = ["""text"""]
UpperCAmelCase : List[Any] = ["""audio"""]
def __snake_case ( self : Tuple):
if self.post_processor is None:
a : Tuple = "microsoft/speecht5_hifigan"
super().setup()
def __snake_case ( self : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any]=None):
a : Any = self.pre_processor(text=__UpperCAmelCase , return_tensors="pt" , truncation=__UpperCAmelCase)
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings.")
a : List[str] = load_dataset("Matthijs/cmu-arctic-xvectors" , split="validation")
a : Optional[int] = torch.tensor(embeddings_dataset[7305]["xvector"]).unsqueeze(0)
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Optional[int]):
with torch.no_grad():
return self.model.generate_speech(**__UpperCAmelCase)
def __snake_case ( self : Tuple , __UpperCAmelCase : str):
with torch.no_grad():
return self.post_processor(__UpperCAmelCase).cpu().detach()
| 40 |
'''simple docstring'''
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : str ) -> int:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
UpperCAmelCase : Optional[Any] = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__snake_case , cache_dir=__snake_case )
UpperCAmelCase : str = [t[-1] for t in os.walk(os.path.join(__snake_case , os.listdir(__snake_case )[0] , '''snapshots''' ) )]
UpperCAmelCase : str = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : List[str] ) -> Dict:
UpperCAmelCase , UpperCAmelCase : str = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__snake_case )
UpperCAmelCase : List[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : List[str] = jax.random.PRNGKey(0 )
UpperCAmelCase : Optional[Any] = 4
UpperCAmelCase : Optional[Any] = jax.device_count()
UpperCAmelCase : Tuple = num_samples * [prompt]
UpperCAmelCase : int = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : Tuple = replicate(__snake_case )
UpperCAmelCase : Any = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : Optional[Any] = shard(__snake_case )
UpperCAmelCase : Optional[int] = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1E-3
assert np.abs(np.abs(__snake_case , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5E-1
UpperCAmelCase : Union[str, Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__snake_case ) == num_samples
def A ( self : List[Any] ) -> List[str]:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=__snake_case )
UpperCAmelCase : Dict = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : Optional[Any] = jax.random.PRNGKey(0 )
UpperCAmelCase : Any = 50
UpperCAmelCase : Union[str, Any] = jax.device_count()
UpperCAmelCase : int = num_samples * [prompt]
UpperCAmelCase : Union[str, Any] = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : Dict = replicate(__snake_case )
UpperCAmelCase : int = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : Tuple = shard(__snake_case )
UpperCAmelCase : Tuple = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5E-1
def A ( self : int ) -> Dict:
UpperCAmelCase , UpperCAmelCase : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__snake_case )
UpperCAmelCase : Dict = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : Union[str, Any] = jax.random.PRNGKey(0 )
UpperCAmelCase : List[str] = 50
UpperCAmelCase : Union[str, Any] = jax.device_count()
UpperCAmelCase : List[Any] = num_samples * [prompt]
UpperCAmelCase : int = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : Tuple = replicate(__snake_case )
UpperCAmelCase : List[Any] = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : Optional[int] = shard(__snake_case )
UpperCAmelCase : Optional[Any] = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def A ( self : int ) -> Any:
UpperCAmelCase , UpperCAmelCase : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa )
UpperCAmelCase : List[str] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : List[str] = jax.random.PRNGKey(0 )
UpperCAmelCase : Union[str, Any] = 50
UpperCAmelCase : Optional[int] = jax.device_count()
UpperCAmelCase : List[str] = num_samples * [prompt]
UpperCAmelCase : Dict = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : Tuple = replicate(__snake_case )
UpperCAmelCase : Any = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : str = shard(__snake_case )
UpperCAmelCase : Optional[int] = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def A ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase : int = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , set_alpha_to_one=__snake_case , steps_offset=1 , )
UpperCAmelCase , UpperCAmelCase : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=__snake_case , safety_checker=__snake_case , )
UpperCAmelCase : Tuple = scheduler.create_state()
UpperCAmelCase : Dict = scheduler_state
UpperCAmelCase : str = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : int = jax.random.PRNGKey(0 )
UpperCAmelCase : Union[str, Any] = 50
UpperCAmelCase : Optional[Any] = jax.device_count()
UpperCAmelCase : Any = num_samples * [prompt]
UpperCAmelCase : Dict = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : str = replicate(__snake_case )
UpperCAmelCase : List[str] = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : Optional[int] = shard(__snake_case )
UpperCAmelCase : Dict = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5E-1
def A ( self : Any ) -> Tuple:
UpperCAmelCase : List[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : Union[str, Any] = jax.device_count()
UpperCAmelCase : List[Any] = num_samples * [prompt]
UpperCAmelCase : str = jax.random.split(jax.random.PRNGKey(0 ) , __snake_case )
UpperCAmelCase , UpperCAmelCase : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__snake_case , )
UpperCAmelCase : Dict = replicate(__snake_case )
UpperCAmelCase : Optional[Any] = pipeline.prepare_inputs(__snake_case )
UpperCAmelCase : List[str] = shard(__snake_case )
UpperCAmelCase : Any = pipeline(__snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
UpperCAmelCase : Optional[int] = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
UpperCAmelCase , UpperCAmelCase : Any = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__snake_case , use_memory_efficient_attention=__snake_case , )
UpperCAmelCase : int = replicate(__snake_case )
UpperCAmelCase : int = pipeline.prepare_inputs(__snake_case )
UpperCAmelCase : List[Any] = shard(__snake_case )
UpperCAmelCase : Optional[Any] = pipeline(__snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
UpperCAmelCase : int = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 23 | 0 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowerCamelCase_ = datasets.load_iris()
lowerCamelCase_ = np.array(data['data'])
lowerCamelCase_ = np.array(data['target'])
lowerCamelCase_ = data['target_names']
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = train_test_split(X, y)
def SCREAMING_SNAKE_CASE_ ( __A : List[str] , __A : Union[str, Any] ) -> List[Any]:
return np.linalg.norm(np.array(__A ) - np.array(__A ) )
def SCREAMING_SNAKE_CASE_ ( __A : Tuple , __A : List[str] , __A : List[str] , __A : List[Any] , __A : int=5 ) -> List[Any]:
_SCREAMING_SNAKE_CASE = zip(__A , __A )
# List of distances of all points from the point to be classified
_SCREAMING_SNAKE_CASE = []
for data_point in data:
_SCREAMING_SNAKE_CASE = euclidean_distance(data_point[0] , __A )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
_SCREAMING_SNAKE_CASE = [i[1] for i in sorted(__A )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
_SCREAMING_SNAKE_CASE = Counter(__A ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 363 |
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCamelCase_ = '<<<<<<< This should probably be modified because it mentions: '
lowerCamelCase_ = '=======\n>>>>>>>\n'
lowerCamelCase_ = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
lowerCamelCase_ = [
# (pattern, replacement)
# Order is important here for some replacements
(r'tfds\.core', r'datasets'),
(r'tf\.io\.gfile\.GFile', r'open'),
(r'tf\.([\w\d]+)', r'datasets.Value(\'\1\')'),
(r'tfds\.features\.Text\(\)', r'datasets.Value(\'string\')'),
(r'tfds\.features\.Text\(', r'datasets.Value(\'string\'),'),
(r'features\s*=\s*tfds.features.FeaturesDict\(', r'features=datasets.Features('),
(r'tfds\.features\.FeaturesDict\(', r'dict('),
(r'The TensorFlow Datasets Authors', r'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(r'tfds\.', r'datasets.'),
(r'dl_manager\.manual_dir', r'self.config.data_dir'),
(r'self\.builder_config', r'self.config'),
]
def SCREAMING_SNAKE_CASE_ ( __A : Namespace ) -> List[Any]:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class lowercase_ ( A ):
"""simple docstring"""
@staticmethod
def lowerCAmelCase_ ( __lowerCamelCase : ArgumentParser ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=__lowerCamelCase , required=__lowerCamelCase , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=__lowerCamelCase , required=__lowerCamelCase , help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=__lowerCamelCase )
def __init__( self : Dict , __lowerCamelCase : str , __lowerCamelCase : str , *__lowerCamelCase : Tuple ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = get_logger("datasets-cli/converting" )
_SCREAMING_SNAKE_CASE = tfds_path
_SCREAMING_SNAKE_CASE = datasets_directory
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
_SCREAMING_SNAKE_CASE = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
_SCREAMING_SNAKE_CASE = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
_SCREAMING_SNAKE_CASE = os.path.abspath(self._datasets_directory )
self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = {}
if os.path.isdir(self._tfds_path ):
_SCREAMING_SNAKE_CASE = os.listdir(__lowerCamelCase )
else:
_SCREAMING_SNAKE_CASE = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F"""Looking at file {f_name}""" )
_SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , __lowerCamelCase )
if not os.path.isfile(__lowerCamelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(__lowerCamelCase , encoding="utf-8" ) as f:
_SCREAMING_SNAKE_CASE = f.readlines()
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = []
for line in lines:
_SCREAMING_SNAKE_CASE = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
_SCREAMING_SNAKE_CASE = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
_SCREAMING_SNAKE_CASE = ""
continue
elif "from absl import logging" in out_line:
_SCREAMING_SNAKE_CASE = "from datasets import logging\n"
elif "getLogger" in out_line:
_SCREAMING_SNAKE_CASE = out_line.replace("getLogger" , "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = list(filter(lambda __lowerCamelCase : e in out_line , __lowerCamelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__lowerCamelCase ) + "\n" )
out_lines.append(__lowerCamelCase )
out_lines.append(__lowerCamelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
_SCREAMING_SNAKE_CASE = re.sub(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
_SCREAMING_SNAKE_CASE = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , __lowerCamelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
_SCREAMING_SNAKE_CASE = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
_SCREAMING_SNAKE_CASE = True
out_lines.append(__lowerCamelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
_SCREAMING_SNAKE_CASE = f_name.replace(".py" , "" )
_SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , __lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
self._logger.info(F"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__lowerCamelCase )
if needs_manual_update:
with_manual_update.append(__lowerCamelCase )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.writelines(__lowerCamelCase )
self._logger.info(F"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
_SCREAMING_SNAKE_CASE = os.path.basename(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = imports_to_builder_map[f_name.replace(".py" , "" )]
self._logger.info(F"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(__lowerCamelCase , __lowerCamelCase )
except KeyError:
self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 111 | 0 |
"""simple docstring"""
# flake8: noqa
# Lint as: python3
a : List[Any] = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 105 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _SCREAMING_SNAKE_CASE ( ) ->Dict:
'''simple docstring'''
a : Any = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=_lowercase )
a : List[Any] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=_lowercase )
env_command_parser(subparsers=_lowercase )
launch_command_parser(subparsers=_lowercase )
tpu_command_parser(subparsers=_lowercase )
test_command_parser(subparsers=_lowercase )
# Let's go
a : List[Any] = parser.parse_args()
if not hasattr(_lowercase , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(_lowercase )
if __name__ == "__main__":
main()
| 105 | 1 |
def lowerCamelCase__ ( _lowercase = 3 , _lowercase = 7 , _lowercase = 1000000 ):
'''simple docstring'''
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : Union[str, Any] = 1
for current_denominator in range(1 , limit + 1 ):
UpperCAmelCase_ : List[Any] = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
UpperCAmelCase_ : Dict = current_numerator
UpperCAmelCase_ : Optional[int] = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_000_000)) | 235 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__a = logging.getLogger()
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCAmelCase_ : Dict = parser.parse_args()
return args.f
class __a( _a ):
"""simple docstring"""
def a__ ( self ) -> None:
UpperCAmelCase_ : int = logging.StreamHandler(sys.stdout )
logger.addHandler(_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ : int = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 ,'''run_glue_deebert.py''' )
with patch.object(_SCREAMING_SNAKE_CASE ,'''argv''' ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[str] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_SCREAMING_SNAKE_CASE ,0.6_66 )
@slow
@require_torch_non_multi_gpu
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : List[Any] = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(_SCREAMING_SNAKE_CASE ) | 235 | 1 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
# Load configuration defined in the metadata file
with open(lowerCAmelCase__ ) as metadata_file:
lowercase = json.load(lowerCAmelCase__ )
lowercase = LukeConfig(use_entity_aware_attention=lowerCAmelCase__ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
lowercase = torch.load(lowerCAmelCase__ , map_location='''cpu''' )['''module''']
# Load the entity vocab file
lowercase = load_original_entity_vocab(lowerCAmelCase__ )
# add an entry for [MASK2]
lowercase = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
lowercase = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
lowercase = AddedToken('''<ent>''' , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )
lowercase = AddedToken('''<ent2>''' , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , '''tokenizer_config.json''' ) , '''r''' ) as f:
lowercase = json.load(lowerCAmelCase__ )
lowercase = '''MLukeTokenizer'''
with open(os.path.join(lowerCAmelCase__ , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = MLukeTokenizer.from_pretrained(lowerCAmelCase__ )
# Initialize the embeddings of the special tokens
lowercase = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
lowercase = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
lowercase = state_dict['''embeddings.word_embeddings.weight''']
lowercase = word_emb[ent_init_index].unsqueeze(0 )
lowercase = word_emb[enta_init_index].unsqueeze(0 )
lowercase = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
lowercase = state_dict[bias_name]
lowercase = decoder_bias[ent_init_index].unsqueeze(0 )
lowercase = decoder_bias[enta_init_index].unsqueeze(0 )
lowercase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowercase = f'encoder.layer.{layer_index}.attention.self.'
lowercase = state_dict[prefix + matrix_name]
lowercase = state_dict[prefix + matrix_name]
lowercase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowercase = state_dict['''entity_embeddings.entity_embeddings.weight''']
lowercase = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
lowercase = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
lowercase = state_dict['''entity_predictions.bias''']
lowercase = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
lowercase = torch.cat([entity_prediction_bias, entity_mask_bias] )
lowercase = LukeForMaskedLM(config=lowerCAmelCase__ ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
lowercase = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
lowercase = state_dict[key]
else:
lowercase = state_dict[key]
lowercase , lowercase = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
if set(lowerCAmelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'Unexpected unexpected_keys: {unexpected_keys}' )
if set(lowerCAmelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
lowercase = MLukeTokenizer.from_pretrained(lowerCAmelCase__ , task='''entity_classification''' )
lowercase = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
lowercase = (0, 9)
lowercase = tokenizer(lowerCAmelCase__ , entity_spans=[span] , return_tensors='''pt''' )
lowercase = model(**lowerCAmelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowercase = torch.Size((1, 33, 768) )
lowercase = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowercase = torch.Size((1, 1, 768) )
lowercase = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
lowercase = MLukeTokenizer.from_pretrained(lowerCAmelCase__ )
lowercase = '''Tokyo is the capital of <mask>.'''
lowercase = (24, 30)
lowercase = tokenizer(lowerCAmelCase__ , entity_spans=[span] , return_tensors='''pt''' )
lowercase = model(**lowerCAmelCase__ )
lowercase = encoding['''input_ids'''][0].tolist()
lowercase = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
lowercase = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowerCAmelCase__ )
lowercase = outputs.entity_logits[0][0].argmax().item()
lowercase = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(lowerCAmelCase__ ) )
model.save_pretrained(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
lowercase = [json.loads(lowerCAmelCase__ ) for line in open(lowerCAmelCase__ )]
lowercase = {}
for entry in data:
lowercase = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
lowercase = entity_id
break
lowercase = f'{language}:{entity_name}'
lowercase = entity_id
return new_mapping
if __name__ == "__main__":
lowercase__ :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
lowercase__ :int = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 101 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = (EulerDiscreteScheduler,)
__SCREAMING_SNAKE_CASE : Optional[int] = 10
def __lowerCAmelCase ( self , **_lowerCamelCase ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
}
config.update(**_lowerCamelCase )
return config
def __lowerCAmelCase ( self ) ->Tuple:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->int:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : int = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : int = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : Any = sample.to(_lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.prev_sample
SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = self.dummy_model()
SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : List[str] = sample.to(_lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : str = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = output.prev_sample
SCREAMING_SNAKE_CASE : str = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 0.0_0_0_2 ) < 1e-2
assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(_lowerCamelCase )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE : Dict = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = output.prev_sample
SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : str = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**_lowerCamelCase , use_karras_sigmas=_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_model()
SCREAMING_SNAKE_CASE : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
SCREAMING_SNAKE_CASE : int = sample.to(_lowerCamelCase )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE : List[Any] = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = output.prev_sample
SCREAMING_SNAKE_CASE : Optional[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Any = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9 ) < 1e-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3 ) < 1e-3
| 313 | 0 |
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = (KDPMaDiscreteScheduler,)
snake_case_ = 10
def lowercase_ ( self , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = {
'num_train_timesteps': 1_100,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**lowerCamelCase__ )
return config
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase__ , beta_end=lowerCamelCase__ )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase__ )
def lowercase_ ( self ) -> int:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config(prediction_type='v_prediction' )
__lowerCamelCase = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
__lowerCamelCase = self.dummy_model()
__lowerCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowerCamelCase = sample.to(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__lowerCamelCase = scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = output.prev_sample
__lowerCamelCase = torch.sum(torch.abs(lowerCamelCase__ ) )
__lowerCamelCase = torch.mean(torch.abs(lowerCamelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34e-07 ) < 1e-2
assert abs(result_mean.item() - 6.11_12e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72e-07 ) < 1e-2
assert abs(result_mean.item() - 0.00_02 ) < 1e-3
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
if torch_device == "mps":
return
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config()
__lowerCamelCase = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
__lowerCamelCase = self.dummy_model()
__lowerCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowerCamelCase = sample.to(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__lowerCamelCase = scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = output.prev_sample
__lowerCamelCase = torch.sum(torch.abs(lowerCamelCase__ ) )
__lowerCamelCase = torch.mean(torch.abs(lowerCamelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
if torch_device == "mps":
return
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config()
__lowerCamelCase = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase__ )
__lowerCamelCase = self.dummy_model()
__lowerCamelCase = self.dummy_sample_deter.to(lowerCamelCase__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__lowerCamelCase = scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = output.prev_sample
__lowerCamelCase = torch.sum(torch.abs(lowerCamelCase__ ) )
__lowerCamelCase = torch.mean(torch.abs(lowerCamelCase__ ) )
if str(lowerCamelCase__ ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
| 356 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = 42
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCamelCase__ = 32 , lowerCamelCase__ = 64 , lowerCamelCase__ = 20 , lowerCamelCase__ = 768 , lowerCamelCase__=77 , lowerCamelCase__=4 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = "silu" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "linear" , lowerCamelCase__ = "prd" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , ) -> Tuple:
'''simple docstring'''
super().__init__()
__lowerCamelCase = num_attention_heads
__lowerCamelCase = attention_head_dim
__lowerCamelCase = num_attention_heads * attention_head_dim
__lowerCamelCase = additional_embeddings
__lowerCamelCase = time_embed_dim or inner_dim
__lowerCamelCase = embedding_proj_dim or embedding_dim
__lowerCamelCase = clip_embed_dim or embedding_dim
__lowerCamelCase = Timesteps(lowerCamelCase__ , lowerCamelCase__ , 0 )
__lowerCamelCase = TimestepEmbedding(lowerCamelCase__ , lowerCamelCase__ , out_dim=lowerCamelCase__ , act_fn=lowerCamelCase__ )
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
if embedding_proj_norm_type is None:
__lowerCamelCase = None
elif embedding_proj_norm_type == "layer":
__lowerCamelCase = nn.LayerNorm(lowerCamelCase__ )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
if encoder_hid_proj_type is None:
__lowerCamelCase = None
elif encoder_hid_proj_type == "linear":
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
__lowerCamelCase = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCamelCase__ ) )
if added_emb_type == "prd":
__lowerCamelCase = nn.Parameter(torch.zeros(1 , 1 , lowerCamelCase__ ) )
elif added_emb_type is None:
__lowerCamelCase = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
__lowerCamelCase = nn.ModuleList(
[
BasicTransformerBlock(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , dropout=lowerCamelCase__ , activation_fn='gelu' , attention_bias=lowerCamelCase__ , )
for d in range(lowerCamelCase__ )
] )
if norm_in_type == "layer":
__lowerCamelCase = nn.LayerNorm(lowerCamelCase__ )
elif norm_in_type is None:
__lowerCamelCase = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
__lowerCamelCase = nn.LayerNorm(lowerCamelCase__ )
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
__lowerCamelCase = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , lowerCamelCase__ , persistent=lowerCamelCase__ )
__lowerCamelCase = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) )
__lowerCamelCase = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowercase_ ( self ) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
__lowerCamelCase = {}
def fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if hasattr(lowerCamelCase__ , 'set_processor' ):
__lowerCamelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , lowerCamelCase__ , lowerCamelCase__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return processors
def lowercase_ ( self , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(lowerCamelCase__ )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if hasattr(lowerCamelCase__ , 'set_processor' ):
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
module.set_processor(lowerCamelCase__ )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , lowerCamelCase__ , lowerCamelCase__ )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = True , ) -> int:
'''simple docstring'''
__lowerCamelCase = hidden_states.shape[0]
__lowerCamelCase = timestep
if not torch.is_tensor(lowerCamelCase__ ):
__lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0:
__lowerCamelCase = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowerCamelCase = timesteps * torch.ones(lowerCamelCase__ , dtype=timesteps.dtype , device=timesteps.device )
__lowerCamelCase = self.time_proj(lowerCamelCase__ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__lowerCamelCase = timesteps_projected.to(dtype=self.dtype )
__lowerCamelCase = self.time_embedding(lowerCamelCase__ )
if self.embedding_proj_norm is not None:
__lowerCamelCase = self.embedding_proj_norm(lowerCamelCase__ )
__lowerCamelCase = self.embedding_proj(lowerCamelCase__ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__lowerCamelCase = self.encoder_hidden_states_proj(lowerCamelCase__ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
__lowerCamelCase = self.proj_in(lowerCamelCase__ )
__lowerCamelCase = self.positional_embedding.to(hidden_states.dtype )
__lowerCamelCase = []
__lowerCamelCase = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCamelCase__ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__lowerCamelCase = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__lowerCamelCase = hidden_states[:, None, :]
__lowerCamelCase = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__lowerCamelCase = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCamelCase__ , -1 , -1 )
additional_embeds.append(lowerCamelCase__ )
__lowerCamelCase = torch.cat(
lowerCamelCase__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__lowerCamelCase = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__lowerCamelCase = F.pad(
lowerCamelCase__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__lowerCamelCase = hidden_states + positional_embeddings
if attention_mask is not None:
__lowerCamelCase = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
__lowerCamelCase = F.pad(lowerCamelCase__ , (0, self.additional_embeddings) , value=0.0 )
__lowerCamelCase = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__lowerCamelCase = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__lowerCamelCase = self.norm_in(lowerCamelCase__ )
for block in self.transformer_blocks:
__lowerCamelCase = block(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
__lowerCamelCase = self.norm_out(lowerCamelCase__ )
if self.prd_embedding is not None:
__lowerCamelCase = hidden_states[:, -1]
else:
__lowerCamelCase = hidden_states[:, additional_embeddings_len:]
__lowerCamelCase = self.proj_to_clip_embeddings(lowerCamelCase__ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 348 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a_ (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__lowerCAmelCase : List[str] = StableDiffusionXLImgaImgPipeline
__lowerCAmelCase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
__lowerCAmelCase : List[str] = PipelineTesterMixin.required_optional_params - {"""latents"""}
__lowerCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowerCAmelCase : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowerCAmelCase : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Any = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=A_ , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
_lowerCAmelCase : Optional[Any] = EulerDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
_lowerCAmelCase : str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_lowerCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="""gelu""" , projection_dim=3_2 , )
_lowerCAmelCase : Any = CLIPTextModel(A_ )
_lowerCAmelCase : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=A_ )
_lowerCAmelCase : int = CLIPTextModelWithProjection(A_ )
_lowerCAmelCase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=A_ )
_lowerCAmelCase : int = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def __UpperCamelCase ( self , snake_case_ , snake_case_=0 ):
_lowerCAmelCase : int = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A_ ) ).to(A_ )
_lowerCAmelCase : int = image / 2 + 0.5
if str(A_ ).startswith("""mps""" ):
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(A_ )
else:
_lowerCAmelCase : int = torch.Generator(device=A_ ).manual_seed(A_ )
_lowerCAmelCase : Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.75,
}
return inputs
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Optional[int] = self.get_dummy_components()
_lowerCAmelCase : Dict = StableDiffusionXLImgaImgPipeline(**A_ )
_lowerCAmelCase : Optional[int] = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(A_ )
_lowerCAmelCase : Any = sd_pipe(**A_ ).images
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_lowerCAmelCase : Dict = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCamelCase ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def __UpperCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : str = StableDiffusionXLImgaImgPipeline(**A_ )
_lowerCAmelCase : List[str] = sd_pipe.to(A_ )
_lowerCAmelCase : int = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
# forward without prompt embeds
_lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(A_ )
_lowerCAmelCase : List[str] = 3 * ['''this is a negative prompt''']
_lowerCAmelCase : List[Any] = negative_prompt
_lowerCAmelCase : Dict = 3 * [inputs['''prompt''']]
_lowerCAmelCase : Any = sd_pipe(**A_ )
_lowerCAmelCase : List[str] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(A_ )
_lowerCAmelCase : Tuple = 3 * ['''this is a negative prompt''']
_lowerCAmelCase : List[str] = 3 * [inputs.pop("""prompt""" )]
(
_lowerCAmelCase
) : Tuple = sd_pipe.encode_prompt(A_ , negative_prompt=A_ )
_lowerCAmelCase : Dict = sd_pipe(
**A_ , prompt_embeds=A_ , negative_prompt_embeds=A_ , pooled_prompt_embeds=A_ , negative_pooled_prompt_embeds=A_ , )
_lowerCAmelCase : Optional[int] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self , snake_case_ , snake_case_="cpu" , snake_case_=torch.floataa , snake_case_=0 ):
_lowerCAmelCase : str = torch.Generator(device=A_ ).manual_seed(A_ )
_lowerCAmelCase : Dict = np.random.RandomState(A_ ).standard_normal((1, 4, 6_4, 6_4) )
_lowerCAmelCase : str = torch.from_numpy(A_ ).to(device=A_ , dtype=A_ )
_lowerCAmelCase : Union[str, Any] = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_lowerCAmelCase : int = self.get_inputs(A_ )
_lowerCAmelCase : Tuple = pipe(**A_ ).images
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCAmelCase : Dict = np.array([0.4_9493, 0.4_7896, 0.4_0798, 0.5_4214, 0.5_3212, 0.4_8202, 0.4_7656, 0.4_6329, 0.4_8506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 309 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = """table-transformer"""
_UpperCamelCase = ["""past_key_values"""]
_UpperCamelCase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , A_=True , A_=None , A_=3 , A_=100 , A_=6 , A_=2048 , A_=8 , A_=6 , A_=2048 , A_=8 , A_=0.0 , A_=0.0 , A_=True , A_="relu" , A_=256 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=1.0 , A_=False , A_="sine" , A_="resnet50" , A_=True , A_=False , A_=1 , A_=5 , A_=2 , A_=1 , A_=1 , A_=5 , A_=2 , A_=0.1 , **A_ , ) ->Any:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__lowerCAmelCase : Optional[Any] = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(A_ , A_ ):
__lowerCAmelCase : int = backbone_config.get('''model_type''' )
__lowerCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type]
__lowerCAmelCase : Any = config_class.from_dict(A_ )
# set timm attributes to None
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : List[str] = None, None, None
__lowerCAmelCase : Tuple = use_timm_backbone
__lowerCAmelCase : Optional[Any] = backbone_config
__lowerCAmelCase : List[str] = num_channels
__lowerCAmelCase : Tuple = num_queries
__lowerCAmelCase : int = d_model
__lowerCAmelCase : List[Any] = encoder_ffn_dim
__lowerCAmelCase : Optional[int] = encoder_layers
__lowerCAmelCase : List[str] = encoder_attention_heads
__lowerCAmelCase : str = decoder_ffn_dim
__lowerCAmelCase : Union[str, Any] = decoder_layers
__lowerCAmelCase : Any = decoder_attention_heads
__lowerCAmelCase : Optional[int] = dropout
__lowerCAmelCase : Any = attention_dropout
__lowerCAmelCase : Tuple = activation_dropout
__lowerCAmelCase : Optional[Any] = activation_function
__lowerCAmelCase : List[str] = init_std
__lowerCAmelCase : Tuple = init_xavier_std
__lowerCAmelCase : Any = encoder_layerdrop
__lowerCAmelCase : List[Any] = decoder_layerdrop
__lowerCAmelCase : Optional[Any] = encoder_layers
__lowerCAmelCase : Optional[Any] = auxiliary_loss
__lowerCAmelCase : Optional[Any] = position_embedding_type
__lowerCAmelCase : Tuple = backbone
__lowerCAmelCase : Any = use_pretrained_backbone
__lowerCAmelCase : int = dilation
# Hungarian matcher
__lowerCAmelCase : Dict = class_cost
__lowerCAmelCase : List[str] = bbox_cost
__lowerCAmelCase : int = giou_cost
# Loss coefficients
__lowerCAmelCase : Optional[Any] = mask_loss_coefficient
__lowerCAmelCase : Tuple = dice_loss_coefficient
__lowerCAmelCase : int = bbox_loss_coefficient
__lowerCAmelCase : List[Any] = giou_loss_coefficient
__lowerCAmelCase : int = eos_coefficient
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
return self.d_model
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = version.parse("""1.11""" )
@property
def UpperCamelCase__ ( self ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def UpperCamelCase__ ( self ) ->float:
'''simple docstring'''
return 1e-5
@property
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
return 12
| 275 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__a = False
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCamelCase ( self : str ):
return 12
@property
def lowerCamelCase ( self : int ):
return 12
@property
def lowerCamelCase ( self : str ):
return 32
@property
def lowerCamelCase ( self : str ):
torch.manual_seed(0 )
snake_case__ : int = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def lowerCamelCase ( self : Tuple ):
snake_case__ : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowerCamelCase ( self : Union[str, Any] ):
torch.manual_seed(0 )
snake_case__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(snake_case_ )
@property
def lowerCamelCase ( self : Optional[Any] ):
torch.manual_seed(0 )
snake_case__ : Union[str, Any] = 12
snake_case__ : Dict = 12
snake_case__ : Optional[Any] = {
"""attention_bias""": True,
"""cross_attention_dim""": 32,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 32,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
snake_case__ : Union[str, Any] = TransformeraDModel(**snake_case_ )
return model
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : List[Any] = """cpu"""
snake_case__ : Union[str, Any] = self.dummy_vqvae
snake_case__ : Dict = self.dummy_text_encoder
snake_case__ : Union[str, Any] = self.dummy_tokenizer
snake_case__ : Any = self.dummy_transformer
snake_case__ : Optional[Any] = VQDiffusionScheduler(self.num_embed )
snake_case__ : Optional[int] = LearnedClassifierFreeSamplingEmbeddings(learnable=snake_case_ )
snake_case__ : int = VQDiffusionPipeline(
vqvae=snake_case_ , text_encoder=snake_case_ , tokenizer=snake_case_ , transformer=snake_case_ , scheduler=snake_case_ , learned_classifier_free_sampling_embeddings=snake_case_ , )
snake_case__ : Dict = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Tuple = """teddy bear playing in the pool"""
snake_case__ : Tuple = torch.Generator(device=snake_case_ ).manual_seed(0 )
snake_case__ : List[str] = pipe([prompt] , generator=snake_case_ , num_inference_steps=2 , output_type="""np""" )
snake_case__ : Optional[int] = output.images
snake_case__ : Any = torch.Generator(device=snake_case_ ).manual_seed(0 )
snake_case__ : Optional[Any] = pipe(
[prompt] , generator=snake_case_ , output_type="""np""" , return_dict=snake_case_ , num_inference_steps=2 )[0]
snake_case__ : List[Any] = image[0, -3:, -3:, -1]
snake_case__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case__ : Tuple = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase ( self : int ):
snake_case__ : Tuple = """cpu"""
snake_case__ : Optional[Any] = self.dummy_vqvae
snake_case__ : List[str] = self.dummy_text_encoder
snake_case__ : int = self.dummy_tokenizer
snake_case__ : str = self.dummy_transformer
snake_case__ : List[str] = VQDiffusionScheduler(self.num_embed )
snake_case__ : Dict = LearnedClassifierFreeSamplingEmbeddings(
learnable=snake_case_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
snake_case__ : int = VQDiffusionPipeline(
vqvae=snake_case_ , text_encoder=snake_case_ , tokenizer=snake_case_ , transformer=snake_case_ , scheduler=snake_case_ , learned_classifier_free_sampling_embeddings=snake_case_ , )
snake_case__ : Optional[Any] = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Dict = """teddy bear playing in the pool"""
snake_case__ : Optional[Any] = torch.Generator(device=snake_case_ ).manual_seed(0 )
snake_case__ : List[Any] = pipe([prompt] , generator=snake_case_ , num_inference_steps=2 , output_type="""np""" )
snake_case__ : List[str] = output.images
snake_case__ : Dict = torch.Generator(device=snake_case_ ).manual_seed(0 )
snake_case__ : List[str] = pipe(
[prompt] , generator=snake_case_ , output_type="""np""" , return_dict=snake_case_ , num_inference_steps=2 )[0]
snake_case__ : List[Any] = image[0, -3:, -3:, -1]
snake_case__ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case__ : Optional[int] = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self : str ):
snake_case__ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" )
snake_case__ : Optional[Any] = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" )
snake_case__ : List[Any] = pipeline.to(snake_case_ )
pipeline.set_progress_bar_config(disable=snake_case_ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case__ : Any = torch.Generator(device=snake_case_ ).manual_seed(0 )
snake_case__ : Optional[int] = pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=snake_case_ , output_type="""np""" , )
snake_case__ : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 350 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "markuplm"
def __init__( self : List[Any] , snake_case_ : List[Any]=30_522 , snake_case_ : Tuple=768 , snake_case_ : Union[str, Any]=12 , snake_case_ : str=12 , snake_case_ : Optional[Any]=3_072 , snake_case_ : Optional[Any]="gelu" , snake_case_ : str=0.1 , snake_case_ : List[Any]=0.1 , snake_case_ : Dict=512 , snake_case_ : Tuple=2 , snake_case_ : List[str]=0.02 , snake_case_ : int=1E-1_2 , snake_case_ : Any=0 , snake_case_ : Any=0 , snake_case_ : str=2 , snake_case_ : Optional[int]=256 , snake_case_ : Optional[int]=1_024 , snake_case_ : str=216 , snake_case_ : List[str]=1_001 , snake_case_ : Optional[Any]=32 , snake_case_ : int=50 , snake_case_ : Tuple="absolute" , snake_case_ : Tuple=True , snake_case_ : int=None , **snake_case_ : str , ):
super().__init__(
pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ , )
snake_case__ : Tuple = vocab_size
snake_case__ : Optional[int] = hidden_size
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : Union[str, Any] = num_attention_heads
snake_case__ : List[str] = hidden_act
snake_case__ : Dict = intermediate_size
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : Any = attention_probs_dropout_prob
snake_case__ : int = max_position_embeddings
snake_case__ : Optional[int] = type_vocab_size
snake_case__ : List[str] = initializer_range
snake_case__ : str = layer_norm_eps
snake_case__ : List[Any] = position_embedding_type
snake_case__ : Any = use_cache
snake_case__ : Union[str, Any] = classifier_dropout
# additional properties
snake_case__ : List[str] = max_depth
snake_case__ : int = max_xpath_tag_unit_embeddings
snake_case__ : Tuple = max_xpath_subs_unit_embeddings
snake_case__ : Dict = tag_pad_id
snake_case__ : Union[str, Any] = subs_pad_id
snake_case__ : Tuple = xpath_unit_hidden_size
| 43 | 0 |
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class lowercase_ (nn.Module ):
"""simple docstring"""
def __init__( self : Tuple ,lowercase__ : Any ,lowercase__ : str ,lowercase__ : Union[str, Any] ,lowercase__ : str=0.0 ,lowercase__ : Dict = None ,lowercase__ : str = "geglu" ,lowercase__ : List[Any] = None ,lowercase__ : Optional[int] = False ,lowercase__ : List[str] = False ,lowercase__ : Optional[Any] = False ,lowercase__ : Any = False ,lowercase__ : List[str] = True ,lowercase__ : Optional[int] = "layer_norm" ,lowercase__ : Optional[int] = False ,):
super().__init__()
__lowercase = only_cross_attention
__lowercase = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero'''
__lowercase = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm'''
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
F" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}." )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
__lowercase = AdaLayerNorm(_A ,_A )
elif self.use_ada_layer_norm_zero:
__lowercase = AdaLayerNormZero(_A ,_A )
else:
__lowercase = nn.LayerNorm(_A ,elementwise_affine=_A )
__lowercase = Attention(
query_dim=_A ,heads=_A ,dim_head=_A ,dropout=_A ,bias=_A ,cross_attention_dim=cross_attention_dim if only_cross_attention else None ,upcast_attention=_A ,)
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
__lowercase = (
AdaLayerNorm(_A ,_A )
if self.use_ada_layer_norm
else nn.LayerNorm(_A ,elementwise_affine=_A )
)
__lowercase = Attention(
query_dim=_A ,cross_attention_dim=cross_attention_dim if not double_self_attention else None ,heads=_A ,dim_head=_A ,dropout=_A ,bias=_A ,upcast_attention=_A ,) # is self-attn if encoder_hidden_states is none
else:
__lowercase = None
__lowercase = None
# 3. Feed-forward
__lowercase = nn.LayerNorm(_A ,elementwise_affine=_A )
__lowercase = FeedForward(_A ,dropout=_A ,activation_fn=_A ,final_dropout=_A )
# let chunk size default to None
__lowercase = None
__lowercase = 0
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Optional[int] ,lowercase__ : Tuple ):
# Sets chunk feed-forward
__lowercase = chunk_size
__lowercase = dim
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Optional[Any] ,lowercase__ : Union[str, Any] = None ,lowercase__ : Any = None ,lowercase__ : Optional[int] = None ,lowercase__ : Any = None ,lowercase__ : Union[str, Any] = None ,lowercase__ : Tuple = None ,):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
__lowercase = self.norma(_A ,_A )
elif self.use_ada_layer_norm_zero:
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = self.norma(
_A ,_A ,_A ,hidden_dtype=hidden_states.dtype )
else:
__lowercase = self.norma(_A )
__lowercase = cross_attention_kwargs if cross_attention_kwargs is not None else {}
__lowercase = self.attna(
_A ,encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None ,attention_mask=_A ,**_A ,)
if self.use_ada_layer_norm_zero:
__lowercase = gate_msa.unsqueeze(1 ) * attn_output
__lowercase = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
__lowercase = (
self.norma(_A ,_A ) if self.use_ada_layer_norm else self.norma(_A )
)
__lowercase = self.attna(
_A ,encoder_hidden_states=_A ,attention_mask=_A ,**_A ,)
__lowercase = attn_output + hidden_states
# 3. Feed-forward
__lowercase = self.norma(_A )
if self.use_ada_layer_norm_zero:
__lowercase = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F"`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`." )
__lowercase = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
__lowercase = torch.cat(
[self.ff(_A ) for hid_slice in norm_hidden_states.chunk(_A ,dim=self._chunk_dim )] ,dim=self._chunk_dim ,)
else:
__lowercase = self.ff(_A )
if self.use_ada_layer_norm_zero:
__lowercase = gate_mlp.unsqueeze(1 ) * ff_output
__lowercase = ff_output + hidden_states
return hidden_states
class lowercase_ (nn.Module ):
"""simple docstring"""
def __init__( self : Any ,lowercase__ : int ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[int] = 4 ,lowercase__ : Optional[Any] = 0.0 ,lowercase__ : Dict = "geglu" ,lowercase__ : Dict = False ,):
super().__init__()
__lowercase = int(dim * mult )
__lowercase = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
__lowercase = GELU(_A ,_A )
if activation_fn == "gelu-approximate":
__lowercase = GELU(_A ,_A ,approximate='''tanh''' )
elif activation_fn == "geglu":
__lowercase = GEGLU(_A ,_A )
elif activation_fn == "geglu-approximate":
__lowercase = ApproximateGELU(_A ,_A )
__lowercase = nn.ModuleList([] )
# project in
self.net.append(_A )
# project dropout
self.net.append(nn.Dropout(_A ) )
# project out
self.net.append(nn.Linear(_A ,_A ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(_A ) )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ):
for module in self.net:
__lowercase = module(_A )
return hidden_states
class lowercase_ (nn.Module ):
"""simple docstring"""
def __init__( self : Any ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : List[Any] = "none" ):
super().__init__()
__lowercase = nn.Linear(_A ,_A )
__lowercase = approximate
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Any ):
if gate.device.type != "mps":
return F.gelu(_A ,approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ,approximate=self.approximate ).to(dtype=gate.dtype )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ):
__lowercase = self.proj(_A )
__lowercase = self.gelu(_A )
return hidden_states
class lowercase_ (nn.Module ):
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ):
super().__init__()
__lowercase = nn.Linear(_A ,dim_out * 2 )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Union[str, Any] ):
if gate.device.type != "mps":
return F.gelu(_A )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : int ):
__lowercase , __lowercase = self.proj(_A ).chunk(2 ,dim=-1 )
return hidden_states * self.gelu(_A )
class lowercase_ (nn.Module ):
"""simple docstring"""
def __init__( self : Tuple ,lowercase__ : Optional[int] ,lowercase__ : Optional[int] ):
super().__init__()
__lowercase = nn.Linear(_A ,_A )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Any ):
__lowercase = self.proj(_A )
return x * torch.sigmoid(1.7_0_2 * x )
class lowercase_ (nn.Module ):
"""simple docstring"""
def __init__( self : int ,lowercase__ : str ,lowercase__ : Union[str, Any] ):
super().__init__()
__lowercase = nn.Embedding(_A ,_A )
__lowercase = nn.SiLU()
__lowercase = nn.Linear(_A ,embedding_dim * 2 )
__lowercase = nn.LayerNorm(_A ,elementwise_affine=_A )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : Any ):
__lowercase = self.linear(self.silu(self.emb(_A ) ) )
__lowercase , __lowercase = torch.chunk(_A ,2 )
__lowercase = self.norm(_A ) * (1 + scale) + shift
return x
class lowercase_ (nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : str ):
super().__init__()
__lowercase = CombinedTimestepLabelEmbeddings(_A ,_A )
__lowercase = nn.SiLU()
__lowercase = nn.Linear(_A ,6 * embedding_dim ,bias=_A )
__lowercase = nn.LayerNorm(_A ,elementwise_affine=_A ,eps=1e-6 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[Any] ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : str=None ):
__lowercase = self.linear(self.silu(self.emb(_A ,_A ,hidden_dtype=_A ) ) )
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = emb.chunk(6 ,dim=1 )
__lowercase = self.norm(_A ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class lowercase_ (nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[int] = None ,lowercase__ : Dict = 1e-5 ):
super().__init__()
__lowercase = num_groups
__lowercase = eps
if act_fn is None:
__lowercase = None
else:
__lowercase = get_activation(_A )
__lowercase = nn.Linear(_A ,out_dim * 2 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Dict ,lowercase__ : Tuple ):
if self.act:
__lowercase = self.act(_A )
__lowercase = self.linear(_A )
__lowercase = emb[:, :, None, None]
__lowercase , __lowercase = emb.chunk(2 ,dim=1 )
__lowercase = F.group_norm(_A ,self.num_groups ,eps=self.eps )
__lowercase = x * (1 + scale) + shift
return x
| 104 |
from cva import destroyAllWindows, imread, imshow, waitKey
def A__ ( __lowerCamelCase ):
# getting number of pixels in the image
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = [2_55, 2_55, 2_55] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__UpperCAmelCase = imread("image_data/lena.jpg", 1)
# convert to its negative
__UpperCAmelCase = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 299 | 0 |
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowercase__ : Optional[Any] = logging.get_logger(__name__)
class _UpperCAmelCase :
def __init__( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Dict ):
snake_case_ : Any = question_encoder
snake_case_ : int = generator
snake_case_ : Optional[Any] = self.question_encoder
def _snake_case ( self : Any , lowercase_ : str ):
if os.path.isfile(lowercase_ ):
raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
snake_case_ : Dict = os.path.join(lowercase_ , '''question_encoder_tokenizer''' )
snake_case_ : int = os.path.join(lowercase_ , '''generator_tokenizer''' )
self.question_encoder.save_pretrained(lowercase_ )
self.generator.save_pretrained(lowercase_ )
@classmethod
def _snake_case ( cls : List[str] , lowercase_ : Any , **lowercase_ : Any ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
snake_case_ : Optional[Any] = kwargs.pop('''config''' , lowercase_ )
if config is None:
snake_case_ : int = RagConfig.from_pretrained(lowercase_ )
snake_case_ : int = AutoTokenizer.from_pretrained(
lowercase_ , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' )
snake_case_ : Optional[int] = AutoTokenizer.from_pretrained(
lowercase_ , config=config.generator , subfolder='''generator_tokenizer''' )
return cls(question_encoder=lowercase_ , generator=lowercase_ )
def __call__( self : Optional[int] , *lowercase_ : Dict , **lowercase_ : Optional[Any] ):
return self.current_tokenizer(*lowercase_ , **lowercase_ )
def _snake_case ( self : str , *lowercase_ : List[str] , **lowercase_ : Dict ):
return self.generator.batch_decode(*lowercase_ , **lowercase_ )
def _snake_case ( self : str , *lowercase_ : Optional[int] , **lowercase_ : Optional[Any] ):
return self.generator.decode(*lowercase_ , **lowercase_ )
def _snake_case ( self : List[str] ):
snake_case_ : Union[str, Any] = self.question_encoder
def _snake_case ( self : Optional[Any] ):
snake_case_ : List[Any] = self.generator
def _snake_case ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : Optional[List[str]] = None , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , lowercase_ : str = "longest" , lowercase_ : str = None , lowercase_ : bool = True , **lowercase_ : Union[str, Any] , ):
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , lowercase_ , )
if max_length is None:
snake_case_ : str = self.current_tokenizer.model_max_length
snake_case_ : str = self(
lowercase_ , add_special_tokens=lowercase_ , return_tensors=lowercase_ , max_length=lowercase_ , padding=lowercase_ , truncation=lowercase_ , **lowercase_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
snake_case_ : Union[str, Any] = self.current_tokenizer.model_max_length
snake_case_ : Any = self(
text_target=lowercase_ , add_special_tokens=lowercase_ , return_tensors=lowercase_ , padding=lowercase_ , max_length=lowercase_ , truncation=lowercase_ , **lowercase_ , )
snake_case_ : List[Any] = labels['''input_ids''']
return model_inputs
| 155 |
"""simple docstring"""
import math
import sys
def __lowercase ( _a ):
if number != int(_a ):
raise ValueError('''the value of input must be a natural number''' )
if number < 0:
raise ValueError('''the value of input must not be a negative number''' )
if number == 0:
return 1
snake_case_ : int = [-1] * (number + 1)
snake_case_ : int = 0
for i in range(1 , number + 1 ):
snake_case_ : Tuple = sys.maxsize
snake_case_ : List[Any] = int(math.sqrt(_a ) )
for j in range(1 , root + 1 ):
snake_case_ : Dict = 1 + answers[i - (j**2)]
snake_case_ : int = min(_a , _a )
snake_case_ : Any = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 155 | 1 |
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def snake_case_ (_a : List[str] ):
UpperCAmelCase = []
for line in lines:
UpperCAmelCase = re.sub(R'''#.*''' , '''''' , _a ) # remove comments
if line:
filtered_lines.append(_a )
UpperCAmelCase = '\n'.join(_a )
# Make a hash from all this code
UpperCAmelCase = full_str.encode('''utf-8''' )
return shaaaa(_a ).hexdigest()
# get importable module names and hash for caching
A ={
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
A ={
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
A ={'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
A ={}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 34 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class A_ ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any]=1_3 , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : Tuple=True , UpperCAmelCase : str=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=9_9 , UpperCAmelCase : Optional[int]=3_2 , UpperCAmelCase : Dict=5 , UpperCAmelCase : int=4 , UpperCAmelCase : Optional[Any]=3_7 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : str=5_1_2 , UpperCAmelCase : Dict=1_6 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : int=0.02 , UpperCAmelCase : List[Any]=4 , ) -> Optional[Any]:
__lowerCAmelCase: str = parent
__lowerCAmelCase: Dict = batch_size
__lowerCAmelCase: Optional[int] = seq_length
__lowerCAmelCase: Dict = is_training
__lowerCAmelCase: Optional[Any] = use_attention_mask
__lowerCAmelCase: List[Any] = use_token_type_ids
__lowerCAmelCase: Optional[int] = use_labels
__lowerCAmelCase: Optional[Any] = vocab_size
__lowerCAmelCase: Optional[Any] = hidden_size
__lowerCAmelCase: Tuple = num_hidden_layers
__lowerCAmelCase: List[str] = num_attention_heads
__lowerCAmelCase: int = intermediate_size
__lowerCAmelCase: Union[str, Any] = hidden_act
__lowerCAmelCase: List[Any] = hidden_dropout_prob
__lowerCAmelCase: List[str] = attention_probs_dropout_prob
__lowerCAmelCase: Optional[int] = max_position_embeddings
__lowerCAmelCase: Union[str, Any] = type_vocab_size
__lowerCAmelCase: int = type_sequence_label_size
__lowerCAmelCase: Union[str, Any] = initializer_range
__lowerCAmelCase: Any = num_choices
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase: List[Any] = None
if self.use_attention_mask:
__lowerCAmelCase: List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase: Optional[Any] = None
if self.use_token_type_ids:
__lowerCAmelCase: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase: Optional[int] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self : Dict ) -> Any:
__lowerCAmelCase: Optional[int] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Optional[Any] = config_and_inputs
__lowerCAmelCase: Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class A_ ( snake_case__ , unittest.TestCase ):
_lowercase : Dict = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase: List[Any] = FlaxAlbertModelTester(self )
@slow
def UpperCAmelCase ( self : Tuple ) -> Dict:
for model_class_name in self.all_model_classes:
__lowerCAmelCase: Optional[Any] = model_class_name.from_pretrained('albert-base-v2' )
__lowerCAmelCase: Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase )
@require_flax
class A_ ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: List[Any] = FlaxAlbertModel.from_pretrained('albert-base-v2' )
__lowerCAmelCase: Optional[int] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
__lowerCAmelCase: Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowerCAmelCase: Tuple = model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
__lowerCAmelCase: str = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , UpperCAmelCase )
__lowerCAmelCase: List[str] = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase , atol=1E-4 ) )
| 322 | 0 |
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowerCAmelCase_ ( __A, __A=False ) -> int:
'''simple docstring'''
UpperCAmelCase__ = OmegaConf.load(lowercase__ )
if display:
print(yaml.dump(OmegaConf.to_container(lowercase__ ) ) )
return config
def lowerCAmelCase_ ( __A, __A=None, __A=None ) -> Tuple:
'''simple docstring'''
if conf_path is None:
UpperCAmelCase__ = "./model_checkpoints/vqgan_only.yaml"
UpperCAmelCase__ = load_config(lowercase__, display=lowercase__ )
UpperCAmelCase__ = VQModel(**config.model.params )
if ckpt_path is None:
UpperCAmelCase__ = "./model_checkpoints/vqgan_only.pt"
UpperCAmelCase__ = torch.load(lowercase__, map_location=lowercase__ )
if ".ckpt" in ckpt_path:
UpperCAmelCase__ = sd["state_dict"]
model.load_state_dict(lowercase__, strict=lowercase__ )
model.to(lowercase__ )
del sd
return model
def lowerCAmelCase_ ( __A, __A ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = model.encode(lowercase__ )
print(f"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
UpperCAmelCase__ = model.decode(lowercase__ )
return xrec
def lowerCAmelCase_ ( __A, __A=False ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ = string.rsplit(".", 1 )
if reload:
UpperCAmelCase__ = importlib.import_module(lowercase__ )
importlib.reload(lowercase__ )
return getattr(importlib.import_module(lowercase__, package=lowercase__ ), cls )
def lowerCAmelCase_ ( __A ) -> Optional[Any]:
'''simple docstring'''
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params", {} ) )
def lowerCAmelCase_ ( __A, __A, __A=True, __A=True ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = instantiate_from_config(lowercase__ )
if sd is not None:
model.load_state_dict(lowercase__ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowerCAmelCase_ ( __A, __A, __A, __A ) -> List[Any]:
'''simple docstring'''
if ckpt:
UpperCAmelCase__ = torch.load(lowercase__, map_location="cpu" )
UpperCAmelCase__ = pl_sd["global_step"]
print(f"""loaded model from global step {global_step}.""" )
else:
UpperCAmelCase__ = {"state_dict": None}
UpperCAmelCase__ = None
UpperCAmelCase__ = load_model_from_config(config.model, pl_sd["state_dict"], gpu=lowercase__, eval_mode=lowercase__ )["model"]
return model, global_step
| 363 | from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 143 | 0 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def snake_case_ ( )-> List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = 10
_UpperCAmelCase : List[Any] = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
_UpperCAmelCase : str = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [97], """text""": ["""1976"""]}] * 10,
"""id""": list(range(lowerCAmelCase_ ) ),
} , features=lowerCAmelCase_ , )
return dataset
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
'''simple docstring'''
_UpperCAmelCase : str = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=lowerCAmelCase_ )
return filename
# FILE_CONTENT + files
A_ : Optional[int] = """\
Text data.
Second line of data."""
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ )-> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
_UpperCAmelCase : Any = FILE_CONTENT
with open(lowerCAmelCase_ , """w""" ) as f:
f.write(lowerCAmelCase_ )
return filename
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ )-> List[str]:
'''simple docstring'''
import bza
_UpperCAmelCase : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
_UpperCAmelCase : int = bytes(lowerCAmelCase_ , """utf-8""" )
with bza.open(lowerCAmelCase_ , """wb""" ) as f:
f.write(lowerCAmelCase_ )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
import gzip
_UpperCAmelCase : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
_UpperCAmelCase : str = bytes(lowerCAmelCase_ , """utf-8""" )
with gzip.open(lowerCAmelCase_ , """wb""" ) as f:
f.write(lowerCAmelCase_ )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ )-> int:
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_UpperCAmelCase : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
_UpperCAmelCase : Optional[int] = bytes(lowerCAmelCase_ , """utf-8""" )
with lza.frame.open(lowerCAmelCase_ , """wb""" ) as f:
f.write(lowerCAmelCase_ )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_UpperCAmelCase : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(lowerCAmelCase_ , """w""" ) as archive:
archive.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
'''simple docstring'''
import tarfile
_UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(lowerCAmelCase_ , """w""" ) as f:
f.add(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ )-> int:
'''simple docstring'''
import lzma
_UpperCAmelCase : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
_UpperCAmelCase : Any = bytes(lowerCAmelCase_ , """utf-8""" )
with lzma.open(lowerCAmelCase_ , """wb""" ) as f:
f.write(lowerCAmelCase_ )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
'''simple docstring'''
import zipfile
_UpperCAmelCase : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(lowerCAmelCase_ , """w""" ) as f:
f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ )-> Any:
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_UpperCAmelCase : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
_UpperCAmelCase : Union[str, Any] = bytes(lowerCAmelCase_ , """utf-8""" )
with zstd.open(lowerCAmelCase_ , """wb""" ) as f:
f.write(lowerCAmelCase_ )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ )-> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
_UpperCAmelCase : Optional[int] = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(lowerCAmelCase_ , """w""" ) as f:
f.write(lowerCAmelCase_ )
return filename
A_ : int = [
{"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0},
]
A_ : Dict = [
{"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0},
{"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0},
]
A_ : Union[str, Any] = {
"""col_1""": ["""0""", """1""", """2""", """3"""],
"""col_2""": [0, 1, 2, 3],
"""col_3""": [0.0, 1.0, 2.0, 3.0],
}
A_ : List[Any] = [
{"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0},
{"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1},
]
A_ : Tuple = [
{"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0},
]
@pytest.fixture(scope="""session""" )
def snake_case_ ( )-> Optional[int]:
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ )-> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = datasets.Dataset.from_dict(lowerCAmelCase_ )
_UpperCAmelCase : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=lowerCAmelCase_ )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ )-> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(lowerCAmelCase_ ) ) as con:
_UpperCAmelCase : Optional[int] = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ )-> Dict:
'''simple docstring'''
_UpperCAmelCase : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(lowerCAmelCase_ , """w""" , newline="""""" ) as f:
_UpperCAmelCase : List[Any] = csv.DictWriter(lowerCAmelCase_ , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCAmelCase_ )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ )-> List[str]:
'''simple docstring'''
_UpperCAmelCase : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(lowerCAmelCase_ , """w""" , newline="""""" ) as f:
_UpperCAmelCase : Tuple = csv.DictWriter(lowerCAmelCase_ , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCAmelCase_ )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
'''simple docstring'''
import bza
_UpperCAmelCase : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(lowerCAmelCase_ , """rb""" ) as f:
_UpperCAmelCase : List[str] = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowerCAmelCase_ , """wb""" ) as f:
f.write(lowerCAmelCase_ )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Tuple:
'''simple docstring'''
_UpperCAmelCase : List[str] = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowerCAmelCase_ , """w""" ) as f:
f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ) )
f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowerCAmelCase_ , """w""" ) as f:
f.write(lowerCAmelCase_ , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(lowerCAmelCase_ , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(lowerCAmelCase_ , """w""" ) as f:
f.write(lowerCAmelCase_ , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCAmelCase_ ) ) )
f.write(lowerCAmelCase_ , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCAmelCase_ ) ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ )-> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
_UpperCAmelCase : Dict = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(lowerCAmelCase_ , """wb""" ) as f:
_UpperCAmelCase : Tuple = pq.ParquetWriter(lowerCAmelCase_ , schema=lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowerCAmelCase_ ) )] for k in DATA[0]} , schema=lowerCAmelCase_ )
writer.write_table(lowerCAmelCase_ )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ )-> List[str]:
'''simple docstring'''
_UpperCAmelCase : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
_UpperCAmelCase : Tuple = {"""data""": DATA}
with open(lowerCAmelCase_ , """w""" ) as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ )-> Any:
'''simple docstring'''
_UpperCAmelCase : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
_UpperCAmelCase : Tuple = {"""data""": DATA_DICT_OF_LISTS}
with open(lowerCAmelCase_ , """w""" ) as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(lowerCAmelCase_ , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ )-> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(lowerCAmelCase_ , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ )-> Dict:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(lowerCAmelCase_ , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(lowerCAmelCase_ , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
'''simple docstring'''
import gzip
_UpperCAmelCase : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(lowerCAmelCase_ , """rb""" ) as orig_file:
with gzip.open(lowerCAmelCase_ , """wb""" ) as zipped_file:
zipped_file.writelines(lowerCAmelCase_ )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[str]:
'''simple docstring'''
import gzip
_UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(lowerCAmelCase_ , """rb""" ) as orig_file:
with gzip.open(lowerCAmelCase_ , """wb""" ) as zipped_file:
zipped_file.writelines(lowerCAmelCase_ )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(lowerCAmelCase_ , """w""" ) as f:
f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ) )
f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(lowerCAmelCase_ , """w""" ) as f:
f.write(lowerCAmelCase_ , arcname=os.path.join("""nested""" , os.path.basename(lowerCAmelCase_ ) ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Tuple:
'''simple docstring'''
_UpperCAmelCase : int = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(lowerCAmelCase_ , """w""" ) as f:
f.write(lowerCAmelCase_ , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCAmelCase_ ) ) )
f.write(lowerCAmelCase_ , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCAmelCase_ ) ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(lowerCAmelCase_ , """w""" ) as f:
f.add(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ) )
f.add(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
'''simple docstring'''
_UpperCAmelCase : int = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(lowerCAmelCase_ , """w""" ) as f:
f.add(lowerCAmelCase_ , arcname=os.path.join("""nested""" , os.path.basename(lowerCAmelCase_ ) ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ )-> Tuple:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = ["""0""", """1""", """2""", """3"""]
_UpperCAmelCase : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(lowerCAmelCase_ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = ["""0""", """1""", """2""", """3"""]
_UpperCAmelCase : Any = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(lowerCAmelCase_ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ )-> List[str]:
'''simple docstring'''
_UpperCAmelCase : Tuple = ["""0""", """1""", """2""", """3"""]
_UpperCAmelCase : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(lowerCAmelCase_ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(lowerCAmelCase_ , """w""" ) as f:
f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ) )
f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[str]:
'''simple docstring'''
_UpperCAmelCase : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(lowerCAmelCase_ , """w""" ) as f:
f.write(lowerCAmelCase_ , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCAmelCase_ ) ) )
f.write(lowerCAmelCase_ , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCAmelCase_ ) ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(lowerCAmelCase_ , """w""" ) as f:
f.write(lowerCAmelCase_ , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(lowerCAmelCase_ , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : int = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
_UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(lowerCAmelCase_ )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( )-> List[Any]:
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def snake_case_ ( )-> Dict:
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(lowerCAmelCase_ , """w""" ) as f:
f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ) )
f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowerCAmelCase_ )-> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
return data_dir
| 215 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Dict = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
_UpperCAmelCase : Optional[int] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25_543, 110, 83, 6]] ,dtype=tf.intaa ,) # J'aime le camembert !"
_UpperCAmelCase : Dict = model(a_ )["""last_hidden_state"""]
_UpperCAmelCase : Dict = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape ,a_ )
# compare the actual values for a slice.
_UpperCAmelCase : Tuple = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] ,dtype=tf.floataa ,)
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1E-4 ) )
| 215 | 1 |
from __future__ import annotations
_lowerCamelCase = 1.60_21e-19 # units = C
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float , ) -> tuple[str, float]:
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class a ( _A ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = 'data2vec-text'
def __init__( self : Optional[Any] , __snake_case : Optional[int]=3_05_22 , __snake_case : List[str]=7_68 , __snake_case : Tuple=12 , __snake_case : int=12 , __snake_case : Union[str, Any]=30_72 , __snake_case : List[Any]="gelu" , __snake_case : Any=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Tuple=5_12 , __snake_case : str=2 , __snake_case : str=0.02 , __snake_case : List[Any]=1E-12 , __snake_case : Any=1 , __snake_case : List[Any]=0 , __snake_case : Dict=2 , __snake_case : Any="absolute" , __snake_case : Union[str, Any]=True , __snake_case : Any=None , **__snake_case : List[Any] , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class a ( _A ):
'''simple docstring'''
@property
def lowerCamelCase_ ( self : str ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 177 | 0 |
def lowerCamelCase_ ( _a : Optional[int] = 100_0000 ):
'''simple docstring'''
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : Union[str, Any] = {1: 1}
for inputa in range(2 , UpperCamelCase_ ):
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : int = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
UpperCAmelCase_ : str = (3 * number) + 1
counter += 1
if inputa not in counters:
UpperCAmelCase_ : Tuple = counter
if counter > pre_counter:
UpperCAmelCase_ : Tuple = inputa
UpperCAmelCase_ : List[Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 345 |
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__magic_name__ = logging.get_logger(__name__)
@add_end_docstrings(
__a , R'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' , )
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
def snake_case_ ( self , lowerCAmelCase__):
if self.framework == "tf":
__SCREAMING_SNAKE_CASE = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()
elif self.framework == "pt":
__SCREAMING_SNAKE_CASE = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCAmelCase__)
else:
raise ValueError("""Unsupported framework""")
return masked_index
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.get_masked_index(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = np.prod(masked_index.shape)
if numel < 1:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , f"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def snake_case_ ( self , lowerCAmelCase__):
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0])
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__):
if return_tensors is None:
__SCREAMING_SNAKE_CASE = self.framework
__SCREAMING_SNAKE_CASE = self.tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__)
self.ensure_exactly_one_mask_token(lowerCAmelCase__)
return model_inputs
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.model(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = model_inputs["""input_ids"""]
return model_outputs
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=5 , lowerCAmelCase__=None):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
__SCREAMING_SNAKE_CASE = target_ids.shape[0]
__SCREAMING_SNAKE_CASE = model_outputs["""input_ids"""][0]
__SCREAMING_SNAKE_CASE = model_outputs["""logits"""]
if self.framework == "tf":
__SCREAMING_SNAKE_CASE = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()[:, 0]
__SCREAMING_SNAKE_CASE = outputs.numpy()
__SCREAMING_SNAKE_CASE = outputs[0, masked_index, :]
__SCREAMING_SNAKE_CASE = stable_softmax(lowerCAmelCase__ , axis=-1)
if target_ids is not None:
__SCREAMING_SNAKE_CASE = tf.gather_nd(tf.squeeze(lowerCAmelCase__ , 0) , target_ids.reshape(-1 , 1))
__SCREAMING_SNAKE_CASE = tf.expand_dims(lowerCAmelCase__ , 0)
__SCREAMING_SNAKE_CASE = tf.math.top_k(lowerCAmelCase__ , k=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = topk.values.numpy(), topk.indices.numpy()
else:
__SCREAMING_SNAKE_CASE = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCAmelCase__).squeeze(-1)
# Fill mask pipeline supports only one ${mask_token} per sample
__SCREAMING_SNAKE_CASE = outputs[0, masked_index, :]
__SCREAMING_SNAKE_CASE = logits.softmax(dim=-1)
if target_ids is not None:
__SCREAMING_SNAKE_CASE = probs[..., target_ids]
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = probs.topk(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist())):
__SCREAMING_SNAKE_CASE = []
for v, p in zip(_values , _predictions):
# Copy is important since we're going to modify this array in place
__SCREAMING_SNAKE_CASE = input_ids.numpy().copy()
if target_ids is not None:
__SCREAMING_SNAKE_CASE = target_ids[p].tolist()
__SCREAMING_SNAKE_CASE = p
# Filter padding out:
__SCREAMING_SNAKE_CASE = tokens[np.where(tokens != self.tokenizer.pad_token_id)]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__SCREAMING_SNAKE_CASE = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p]), """sequence""": sequence}
row.append(lowerCAmelCase__)
result.append(lowerCAmelCase__)
if single_mask:
return result[0]
return result
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=None):
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = [targets]
try:
__SCREAMING_SNAKE_CASE = self.tokenizer.get_vocab()
except Exception:
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = []
for target in targets:
__SCREAMING_SNAKE_CASE = vocab.get(lowerCAmelCase__ , lowerCAmelCase__)
if id_ is None:
__SCREAMING_SNAKE_CASE = self.tokenizer(
lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , max_length=1 , truncation=lowerCAmelCase__ , )["""input_ids"""]
if len(lowerCAmelCase__) == 0:
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
"""We cannot replace it with anything meaningful, ignoring it""")
continue
__SCREAMING_SNAKE_CASE = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
f"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_)}`.")
target_ids.append(id_)
__SCREAMING_SNAKE_CASE = list(set(lowerCAmelCase__))
if len(lowerCAmelCase__) == 0:
raise ValueError("""At least one target must be provided when passed.""")
__SCREAMING_SNAKE_CASE = np.array(lowerCAmelCase__)
return target_ids
def snake_case_ ( self , lowerCAmelCase__=None , lowerCAmelCase__=None):
__SCREAMING_SNAKE_CASE = {}
if targets is not None:
__SCREAMING_SNAKE_CASE = self.get_target_ids(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = target_ids
if top_k is not None:
__SCREAMING_SNAKE_CASE = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""")
return {}, {}, postprocess_params
def __call__( self , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = super().__call__(lowerCAmelCase__ , **lowerCAmelCase__)
if isinstance(lowerCAmelCase__ , lowerCAmelCase__) and len(lowerCAmelCase__) == 1:
return outputs[0]
return outputs
| 100 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
_lowerCamelCase :Union[str, Any] = ViTImageProcessor if is_vision_available() else None
@property
def _lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Tuple = (3, 32, 1_28)
lowerCAmelCase__ : List[Any] = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase__ : Union[str, Any] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
lowerCAmelCase__ : Dict = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
lowerCAmelCase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase ) + """\n""" )
lowerCAmelCase__ : Any = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 1_28},
}
lowerCAmelCase__ : List[str] = os.path.join(self.tmpdirname , UpperCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase , UpperCamelCase )
def _lowerCAmelCase ( self : Union[str, Any] , **UpperCamelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def _lowerCAmelCase ( self : List[Any] , **UpperCamelCase : str ) -> str:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase )
def _lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
lowerCAmelCase__ : Union[str, Any] = Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) )
return image_input
def _lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Any = self.get_tokenizer()
lowerCAmelCase__ : List[Any] = self.get_image_processor()
lowerCAmelCase__ : List[str] = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ : List[str] = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def _lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : int = self.get_tokenizer()
lowerCAmelCase__ : Any = self.get_image_processor()
lowerCAmelCase__ : int = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ : str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCAmelCase__ : List[str] = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 )
lowerCAmelCase__ : str = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def _lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : int = self.get_image_processor()
lowerCAmelCase__ : str = self.get_tokenizer()
lowerCAmelCase__ : str = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
lowerCAmelCase__ : Dict = self.prepare_image_inputs()
lowerCAmelCase__ : List[Any] = image_processor(UpperCamelCase , return_tensors="""np""" )
lowerCAmelCase__ : Optional[Any] = processor(images=UpperCamelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.get_image_processor()
lowerCAmelCase__ : List[Any] = self.get_tokenizer()
lowerCAmelCase__ : Any = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
lowerCAmelCase__ : Tuple = """test"""
lowerCAmelCase__ : int = processor(text=UpperCamelCase )
lowerCAmelCase__ : List[str] = tokenizer(UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.get_image_processor()
lowerCAmelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase__ : List[Any] = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
lowerCAmelCase__ : str = """test"""
lowerCAmelCase__ : List[str] = self.prepare_image_inputs()
lowerCAmelCase__ : Union[str, Any] = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase ):
processor()
def _lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : str = self.get_image_processor()
lowerCAmelCase__ : Tuple = self.get_tokenizer()
lowerCAmelCase__ : Union[str, Any] = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
lowerCAmelCase__ : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ : List[str] = processor.char_decode(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = tokenizer.batch_decode(UpperCamelCase )
lowerCAmelCase__ : Tuple = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = self.get_image_processor()
lowerCAmelCase__ : Tuple = self.get_tokenizer()
lowerCAmelCase__ : Optional[Any] = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : Union[str, Any] = self.prepare_image_inputs()
lowerCAmelCase__ : int = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def _lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.get_image_processor()
lowerCAmelCase__ : Any = self.get_tokenizer()
lowerCAmelCase__ : int = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = torch.randn(1 , 27 , 38 )
lowerCAmelCase__ : Any = torch.randn(1 , 27 , 5_02_57 )
lowerCAmelCase__ : Any = torch.randn(1 , 27 , 3_05_22 )
lowerCAmelCase__ : Optional[Any] = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 212 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowercase_ ( __UpperCAmelCase ) -> None:
lowerCAmelCase__ , lowerCAmelCase__ : int = analyze_text(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
lowerCAmelCase__ : List[str] = sum(single_char_strings.values() )
# one length string
lowerCAmelCase__ : List[str] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCAmelCase__ : List[Any] = single_char_strings[ch]
lowerCAmelCase__ : int = my_str / all_sum
my_fir_sum += prob * math.loga(__UpperCAmelCase ) # entropy formula.
# print entropy
print(f"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
lowerCAmelCase__ : Tuple = sum(two_char_strings.values() )
lowerCAmelCase__ : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCAmelCase__ : Optional[int] = cha + cha
if sequence in two_char_strings:
lowerCAmelCase__ : int = two_char_strings[sequence]
lowerCAmelCase__ : str = int(__UpperCAmelCase ) / all_sum
my_sec_sum += prob * math.loga(__UpperCAmelCase )
# print second entropy
print(f"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def lowercase_ ( __UpperCAmelCase ) -> tuple[dict, dict]:
lowerCAmelCase__ : Any = Counter() # type: ignore
lowerCAmelCase__ : Tuple = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(__UpperCAmelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowercase_ ( ) -> Any:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 212 | 1 |
"""simple docstring"""
import math
def _snake_case ( lowerCamelCase__ : list , lowerCamelCase__ : int ) -> int:
lowerCamelCase_ : int =len(lowerCamelCase__ )
lowerCamelCase_ : List[Any] =int(math.floor(math.sqrt(lowerCamelCase__ ) ) )
lowerCamelCase_ : List[Any] =0
while arr[min(lowerCamelCase__ , lowerCamelCase__ ) - 1] < x:
lowerCamelCase_ : str =step
step += int(math.floor(math.sqrt(lowerCamelCase__ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
lowerCamelCase_ : Dict =prev + 1
if prev == min(lowerCamelCase__ , lowerCamelCase__ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
A__ : List[Any] = input('Enter numbers separated by a comma:\n').strip()
A__ : Optional[Any] = [int(item) for item in user_input.split(',')]
A__ : List[str] = int(input('Enter the number to be searched:\n'))
A__ : Any = jump_search(arr, x)
if res == -1:
print('Number not found!')
else:
print(f'Number {x} is at index {res}')
| 144 |
"""simple docstring"""
def _snake_case ( lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : List[Any] , lowerCamelCase__ : str ) -> str:
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
lowerCamelCase_ : Optional[Any] =mf_knapsack(i - 1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
lowerCamelCase_ : Union[str, Any] =max(
mf_knapsack(i - 1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , mf_knapsack(i - 1 , lowerCamelCase__ , lowerCamelCase__ , j - wt[i - 1] ) + val[i - 1] , )
lowerCamelCase_ : int =val
return f[i][j]
def _snake_case ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : Any , lowerCamelCase__ : int ) -> Dict:
lowerCamelCase_ : List[Any] =[[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
lowerCamelCase_ : Union[str, Any] =max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
lowerCamelCase_ : Optional[int] =dp[i - 1][w_]
return dp[n][w_], dp
def _snake_case ( lowerCamelCase__ : int , lowerCamelCase__ : list , lowerCamelCase__ : list ) -> Tuple:
if not (isinstance(lowerCamelCase__ , (list, tuple) ) and isinstance(lowerCamelCase__ , (list, tuple) )):
raise ValueError(
"Both the weights and values vectors must be either lists or tuples" )
lowerCamelCase_ : Optional[int] =len(lowerCamelCase__ )
if num_items != len(lowerCamelCase__ ):
lowerCamelCase_ : Optional[Any] =(
"The number of weights must be the same as the number of values.\n"
F"""But got {num_items} weights and {len(lowerCamelCase__ )} values"""
)
raise ValueError(lowerCamelCase__ )
for i in range(lowerCamelCase__ ):
if not isinstance(wt[i] , lowerCamelCase__ ):
lowerCamelCase_ : Optional[Any] =(
"All weights must be integers but got weight of "
F"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ : Optional[int] =knapsack(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : set =set()
_construct_solution(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return optimal_val, example_optional_set
def _snake_case ( lowerCamelCase__ : list , lowerCamelCase__ : list , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : set ) -> Optional[int]:
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(lowerCamelCase__ , lowerCamelCase__ , i - 1 , lowerCamelCase__ , lowerCamelCase__ )
else:
optimal_set.add(lowerCamelCase__ )
_construct_solution(lowerCamelCase__ , lowerCamelCase__ , i - 1 , j - wt[i - 1] , lowerCamelCase__ )
if __name__ == "__main__":
A__ : Optional[Any] = [3, 2, 4, 4]
A__ : str = [4, 3, 2, 3]
A__ : List[Any] = 4
A__ : Union[str, Any] = 6
A__ : List[str] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
A__ , A__ : Any = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
A__ , A__ : str = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset)
| 144 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCAmelCase__ ( metaclass=lowercase ):
'''simple docstring'''
lowerCamelCase__ = ["""torch""", """scipy"""]
def __init__( self , *lowercase , **lowercase ):
requires_backends(self , ['torch', 'scipy'] )
@classmethod
def A_ ( cls , *lowercase , **lowercase ):
requires_backends(cls , ['torch', 'scipy'] )
@classmethod
def A_ ( cls , *lowercase , **lowercase ):
requires_backends(cls , ['torch', 'scipy'] ) | 368 |
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase , lowercase ):
_lowerCamelCase : Dict = question_encoder
_lowerCamelCase : List[Any] = generator
_lowerCamelCase : Optional[Any] = self.question_encoder
def A_ ( self , lowercase ):
if os.path.isfile(lowercase ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(lowercase , exist_ok=lowercase )
_lowerCamelCase : List[Any] = os.path.join(lowercase , 'question_encoder_tokenizer' )
_lowerCamelCase : Dict = os.path.join(lowercase , 'generator_tokenizer' )
self.question_encoder.save_pretrained(lowercase )
self.generator.save_pretrained(lowercase )
@classmethod
def A_ ( cls , lowercase , **lowercase ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_lowerCamelCase : Optional[int] = kwargs.pop('config' , lowercase )
if config is None:
_lowerCamelCase : int = RagConfig.from_pretrained(lowercase )
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(
lowercase , config=config.question_encoder , subfolder='question_encoder_tokenizer' )
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained(
lowercase , config=config.generator , subfolder='generator_tokenizer' )
return cls(question_encoder=lowercase , generator=lowercase )
def __call__( self , *lowercase , **lowercase ):
return self.current_tokenizer(*lowercase , **lowercase )
def A_ ( self , *lowercase , **lowercase ):
return self.generator.batch_decode(*lowercase , **lowercase )
def A_ ( self , *lowercase , **lowercase ):
return self.generator.decode(*lowercase , **lowercase )
def A_ ( self ):
_lowerCamelCase : Any = self.question_encoder
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.generator
def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = "longest" , lowercase = None , lowercase = True , **lowercase , ):
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , lowercase , )
if max_length is None:
_lowerCamelCase : Optional[Any] = self.current_tokenizer.model_max_length
_lowerCamelCase : Optional[Any] = self(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , max_length=lowercase , padding=lowercase , truncation=lowercase , **lowercase , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_lowerCamelCase : int = self.current_tokenizer.model_max_length
_lowerCamelCase : str = self(
text_target=lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase , **lowercase , )
_lowerCamelCase : int = labels['input_ids']
return model_inputs | 12 | 0 |
'''simple docstring'''
import functools
def lowerCamelCase ( lowerCAmelCase : list[int] , lowerCAmelCase : list[int] ):
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ) or not all(isinstance(lowerCAmelCase , lowerCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(lowerCAmelCase ) != 3 or not all(isinstance(lowerCAmelCase , lowerCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(lowerCAmelCase ) == 0:
return 0
if min(lowerCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(lowerCAmelCase ) >= 366:
raise ValueError('All days elements should be less than 366' )
__magic_name__ : Any = set(lowerCAmelCase )
@functools.cache
def dynamic_programming(lowerCAmelCase : int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 331 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCAmelCase :Tuple = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
lowerCAmelCase :List[Any] = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
lowerCAmelCase :str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
lowerCAmelCase :str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
lowerCAmelCase :Optional[Any] = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]),
('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
lowerCAmelCase :Union[str, Any] = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
lowerCAmelCase :Tuple = (
('''JH AH TH KH QH''', 2_3),
('''JH 9H TH KH QH''', 2_2),
('''JC KH JS JD JH''', 2_1),
('''KH KC 3S 3H 3D''', 2_0),
('''8C 9C 5C 3C TC''', 1_9),
('''JS QS 9H TS KH''', 1_8),
('''7C 7S KH 2H 7H''', 1_7),
('''3C KH 5D 5S KH''', 1_6),
('''QH 8H KD JH 8S''', 1_5),
('''2D 6D 9D TH 7D''', 1_4),
)
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ , __magic_name__ : Union[str, Any] = randrange(len(lowerCAmelCase ) ), randrange(len(lowerCAmelCase ) )
__magic_name__ : Optional[int] = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
__magic_name__ , __magic_name__ : Optional[int] = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowerCamelCase ( lowerCAmelCase : int = 100 ):
"""simple docstring"""
return (generate_random_hand() for _ in range(lowerCAmelCase ))
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
__magic_name__ : Any = PokerHand(lowerCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : str ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : Tuple ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Optional[int] = [PokerHand(lowerCAmelCase ) for hand in SORTED_HANDS]
__magic_name__ : Tuple = poker_hands.copy()
shuffle(lowerCAmelCase )
__magic_name__ : Union[str, Any] = chain(sorted(lowerCAmelCase ) )
for index, hand in enumerate(lowerCAmelCase ):
assert hand == poker_hands[index]
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Dict = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=lowerCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Dict = PokerHand('2C 4S AS 3D 5C' )
__magic_name__ : Optional[Any] = True
__magic_name__ : Union[str, Any] = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Dict = 0
__magic_name__ : Dict = os.path.abspath(os.path.dirname(lowerCAmelCase ) )
__magic_name__ : Union[str, Any] = os.path.join(lowerCAmelCase , 'poker_hands.txt' )
with open(lowerCAmelCase ) as file_hand:
for line in file_hand:
__magic_name__ : Optional[int] = line[:14].strip()
__magic_name__ : List[Any] = line[15:].strip()
__magic_name__ , __magic_name__ : Tuple = PokerHand(lowerCAmelCase ), PokerHand(lowerCAmelCase )
__magic_name__ : List[Any] = player.compare_with(lowerCAmelCase )
if output == "Win":
answer += 1
assert answer == 376 | 331 | 1 |
"""simple docstring"""
from __future__ import annotations
class __snake_case :
def __init__( self : Dict , __lowerCAmelCase : int = 0 ):
"""simple docstring"""
_lowerCamelCase : Any = key
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : int ):
"""simple docstring"""
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and isinstance(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Tuple = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_5_5
return [chr(ord(__lowerCAmelCase ) ^ key ) for ch in content]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int ):
"""simple docstring"""
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and isinstance(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_5_5
return [chr(ord(__lowerCAmelCase ) ^ key ) for ch in content]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : int = 0 ):
"""simple docstring"""
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and isinstance(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Dict = key or self.__key or 1
# make sure key can be any size
while key > 2_5_5:
key -= 2_5_5
# This will be returned
_lowerCamelCase : str = ''''''
for ch in content:
ans += chr(ord(__lowerCAmelCase ) ^ key )
return ans
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int = 0 ):
"""simple docstring"""
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and isinstance(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : int = key or self.__key or 1
# make sure key can be any size
while key > 2_5_5:
key -= 2_5_5
# This will be returned
_lowerCamelCase : Dict = ''''''
for ch in content:
ans += chr(ord(__lowerCAmelCase ) ^ key )
return ans
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : str , __lowerCAmelCase : int = 0 ):
"""simple docstring"""
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and isinstance(__lowerCAmelCase , __lowerCAmelCase )
try:
with open(__lowerCAmelCase ) as fin, open('''encrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(__lowerCAmelCase , __lowerCAmelCase ) )
except OSError:
return False
return True
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int ):
"""simple docstring"""
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and isinstance(__lowerCAmelCase , __lowerCAmelCase )
try:
with open(__lowerCAmelCase ) as fin, open('''decrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(__lowerCAmelCase , __lowerCAmelCase ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 353 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case ( _lowercase):
def __init__( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any]=1_3 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : int=False , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Dict=9_9 , __lowerCAmelCase : str=0 , __lowerCAmelCase : Optional[Any]=3_2 , __lowerCAmelCase : Tuple=5 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[int]=5_1_2 , __lowerCAmelCase : Any=1_2 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Any=0.02 , __lowerCAmelCase : str=3 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : Optional[int]="last" , __lowerCAmelCase : str=None , __lowerCAmelCase : int=None , ):
"""simple docstring"""
_lowerCamelCase : Dict = parent
_lowerCamelCase : List[str] = batch_size
_lowerCamelCase : Dict = seq_length
_lowerCamelCase : List[Any] = is_training
_lowerCamelCase : Dict = use_input_lengths
_lowerCamelCase : Tuple = use_token_type_ids
_lowerCamelCase : Any = use_labels
_lowerCamelCase : Optional[Any] = gelu_activation
_lowerCamelCase : Optional[Any] = sinusoidal_embeddings
_lowerCamelCase : Dict = causal
_lowerCamelCase : Dict = asm
_lowerCamelCase : str = n_langs
_lowerCamelCase : str = vocab_size
_lowerCamelCase : Optional[int] = n_special
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : int = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : Dict = hidden_dropout_prob
_lowerCamelCase : int = attention_probs_dropout_prob
_lowerCamelCase : Any = max_position_embeddings
_lowerCamelCase : Any = type_vocab_size
_lowerCamelCase : Optional[int] = type_sequence_label_size
_lowerCamelCase : List[str] = initializer_range
_lowerCamelCase : List[Any] = num_labels
_lowerCamelCase : Dict = num_choices
_lowerCamelCase : str = summary_type
_lowerCamelCase : List[str] = use_proj
_lowerCamelCase : int = scope
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Optional[int] = None
if self.use_input_lengths:
_lowerCamelCase : int = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_lowerCamelCase : Union[str, Any] = None
if self.use_token_type_ids:
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : List[str] = None
_lowerCamelCase : Optional[Any] = None
if self.use_labels:
_lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : str = ids_tensor([self.batch_size] , 2 ).float()
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : Tuple = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = FlaubertModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase , lengths=__lowerCAmelCase , langs=__lowerCAmelCase )
_lowerCamelCase : str = model(__lowerCAmelCase , langs=__lowerCAmelCase )
_lowerCamelCase : List[str] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , ):
"""simple docstring"""
_lowerCamelCase : Tuple = FlaubertWithLMHeadModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : str = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = FlaubertForQuestionAnsweringSimple(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[str] = model(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , ):
"""simple docstring"""
_lowerCamelCase : str = FlaubertForQuestionAnswering(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = model(
__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , cls_index=__lowerCAmelCase , is_impossible=__lowerCAmelCase , p_mask=__lowerCAmelCase , )
_lowerCamelCase : List[str] = model(
__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , cls_index=__lowerCAmelCase , is_impossible=__lowerCAmelCase , )
((_lowerCamelCase) , ) : str = result_with_labels.to_tuple()
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase )
((_lowerCamelCase) , ) : Union[str, Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : str , ):
"""simple docstring"""
_lowerCamelCase : Dict = FlaubertForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : str = model(__lowerCAmelCase )
_lowerCamelCase : Tuple = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , ):
"""simple docstring"""
_lowerCamelCase : Any = self.num_labels
_lowerCamelCase : List[str] = FlaubertForTokenClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.num_choices
_lowerCamelCase : Any = FlaubertForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : int = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Optional[Any] = config_and_inputs
_lowerCamelCase : int = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : List[str] = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
snake_case__ : List[Any] = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int]=False ):
"""simple docstring"""
_lowerCamelCase : Dict = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
_lowerCamelCase : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
_lowerCamelCase : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : int = FlaubertModelTester(self )
_lowerCamelCase : str = ConfigTester(self , config_class=__lowerCAmelCase , emb_dim=3_7 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Dict = FlaubertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
_lowerCamelCase : Any = True
_lowerCamelCase : int = model_class(config=__lowerCAmelCase )
_lowerCamelCase : List[str] = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : int = torch.jit.trace(
__lowerCAmelCase , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''traced_model.pt''' ) )
_lowerCamelCase : Union[str, Any] = torch.jit.load(os.path.join(__lowerCAmelCase , '''traced_model.pt''' ) , map_location=__lowerCAmelCase )
loaded(inputs_dict['''input_ids'''].to(__lowerCAmelCase ) , inputs_dict['''attention_mask'''].to(__lowerCAmelCase ) )
@require_torch
class __snake_case ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
_lowerCamelCase : Any = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
with torch.no_grad():
_lowerCamelCase : Any = model(__lowerCAmelCase )[0]
_lowerCamelCase : Optional[Any] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , __lowerCAmelCase )
_lowerCamelCase : Optional[int] = torch.tensor(
[[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 175 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A__: Optional[int] = {'''tokenization_bertweet''': ['''BertweetTokenizer''']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
A__: int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 276 | """simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
__UpperCamelCase = None
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase = {
'''facebook/nllb-large-en-ro''': 1024,
'''facebook/nllb-200-distilled-600M''': 1024,
}
# fmt: off
__UpperCamelCase = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = NllbTokenizer
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=False, **lowerCAmelCase__, ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token
snake_case_ = legacy_behaviour
super().__init__(
vocab_file=lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, src_lang=lowerCAmelCase__, tgt_lang=lowerCAmelCase__, additional_special_tokens=lowerCAmelCase__, legacy_behaviour=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = vocab_file
snake_case_ = False if not self.vocab_file else True
snake_case_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens})
snake_case_ = {
lang_code: self.convert_tokens_to_ids(lowerCAmelCase__) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
snake_case_ = src_lang if src_lang is not None else 'eng_Latn'
snake_case_ = self.convert_tokens_to_ids(self._src_lang)
snake_case_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def a_ ( self) -> str:
return self._src_lang
@src_lang.setter
def a_ ( self, lowerCAmelCase__) -> None:
snake_case_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> str:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model')
snake_case_ = src_lang
snake_case_ = self(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__)
snake_case_ = self.convert_tokens_to_ids(lowerCAmelCase__)
snake_case_ = tgt_lang_id
return inputs
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = "eng_Latn", lowerCAmelCase__ = None, lowerCAmelCase__ = "fra_Latn", **lowerCAmelCase__, ) -> BatchEncoding:
snake_case_ = src_lang
snake_case_ = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self) -> List[Any]:
return self.set_src_lang_special_tokens(self.src_lang)
def a_ ( self) -> Tuple:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def a_ ( self, lowerCAmelCase__) -> None:
snake_case_ = self.convert_tokens_to_ids(lowerCAmelCase__)
if self.legacy_behaviour:
snake_case_ = []
snake_case_ = [self.eos_token_id, self.cur_lang_code]
else:
snake_case_ = [self.cur_lang_code]
snake_case_ = [self.eos_token_id]
snake_case_ = self.convert_ids_to_tokens(self.prefix_tokens)
snake_case_ = self.convert_ids_to_tokens(self.suffix_tokens)
snake_case_ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)), )
def a_ ( self, lowerCAmelCase__) -> None:
snake_case_ = self.convert_tokens_to_ids(lowerCAmelCase__)
if self.legacy_behaviour:
snake_case_ = []
snake_case_ = [self.eos_token_id, self.cur_lang_code]
else:
snake_case_ = [self.cur_lang_code]
snake_case_ = [self.eos_token_id]
snake_case_ = self.convert_ids_to_tokens(self.prefix_tokens)
snake_case_ = self.convert_ids_to_tokens(self.suffix_tokens)
snake_case_ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)), )
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(lowerCAmelCase__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.')
return
snake_case_ = os.path.join(
lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__):
copyfile(self.vocab_file, lowerCAmelCase__)
return (out_vocab_file,)
| 69 | 0 |
"""simple docstring"""
from collections.abc import Callable
class UpperCAmelCase_ :
def __init__( self : Dict , A : Callable | None = None ):
# Stores actual heap items.
_UpperCAmelCase : list = []
# Stores indexes of each item for supporting updates and deletion.
_UpperCAmelCase : dict = {}
# Stores current size of heap.
_UpperCAmelCase : Tuple = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_UpperCAmelCase : Any = key or (lambda A : x)
def snake_case_ ( self : List[Any] , A : int ):
return int((i - 1) / 2 ) if i > 0 else None
def snake_case_ ( self : List[Any] , A : int ):
_UpperCAmelCase : Tuple = int(2 * i + 1 )
return left if 0 < left < self.size else None
def snake_case_ ( self : List[Any] , A : int ):
_UpperCAmelCase : Tuple = int(2 * i + 2 )
return right if 0 < right < self.size else None
def snake_case_ ( self : Optional[int] , A : int , A : int ):
_UpperCAmelCase : int = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_UpperCAmelCase : Any = self.arr[j], self.arr[i]
def snake_case_ ( self : List[str] , A : int , A : int ):
return self.arr[i][1] < self.arr[j][1]
def snake_case_ ( self : Dict , A : int ):
_UpperCAmelCase : str = self._left(A )
_UpperCAmelCase : str = self._right(A )
_UpperCAmelCase : List[Any] = i
if left is not None and not self._cmp(A , A ):
_UpperCAmelCase : Optional[int] = left
if right is not None and not self._cmp(A , A ):
_UpperCAmelCase : Any = right
return valid_parent
def snake_case_ ( self : Tuple , A : int ):
_UpperCAmelCase : Tuple = self._parent(A )
while parent is not None and not self._cmp(A , A ):
self._swap(A , A )
_UpperCAmelCase : Dict = parent, self._parent(A )
def snake_case_ ( self : Optional[int] , A : int ):
_UpperCAmelCase : Tuple = self._get_valid_parent(A )
while valid_parent != index:
self._swap(A , A )
_UpperCAmelCase : Optional[Any] = valid_parent, self._get_valid_parent(A )
def snake_case_ ( self : Dict , A : int , A : int ):
if item not in self.pos_map:
return
_UpperCAmelCase : Any = self.pos_map[item]
_UpperCAmelCase : Optional[int] = [item, self.key(A )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(A )
self._heapify_down(A )
def snake_case_ ( self : List[str] , A : int ):
if item not in self.pos_map:
return
_UpperCAmelCase : str = self.pos_map[item]
del self.pos_map[item]
_UpperCAmelCase : Tuple = self.arr[self.size - 1]
_UpperCAmelCase : List[Any] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(A )
self._heapify_down(A )
def snake_case_ ( self : Any , A : int , A : int ):
_UpperCAmelCase : Any = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(A )] )
else:
_UpperCAmelCase : Any = [item, self.key(A )]
_UpperCAmelCase : List[Any] = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def snake_case_ ( self : Tuple ):
return self.arr[0] if self.size else None
def snake_case_ ( self : Any ):
_UpperCAmelCase : Dict = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def __snake_case ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 202 | 0 |
from __future__ import annotations
a_ :int = tuple[int, int, int]
a_ :int = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
a_ :Union[str, Any] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# -------------------------- default selection --------------------------
# rotors --------------------------
a_ :List[str] = "EGZWVONAHDCLFQMSIPJBYUKXTR"
a_ :Optional[int] = "FOBHMDKEXQNRAULPGSJVTYICZW"
a_ :Dict = "ZJXESIUQLHAVRMDOYGTNFWPBKC"
# reflector --------------------------
a_ :Optional[Any] = {
"A": "N",
"N": "A",
"B": "O",
"O": "B",
"C": "P",
"P": "C",
"D": "Q",
"Q": "D",
"E": "R",
"R": "E",
"F": "S",
"S": "F",
"G": "T",
"T": "G",
"H": "U",
"U": "H",
"I": "V",
"V": "I",
"J": "W",
"W": "J",
"K": "X",
"X": "K",
"L": "Y",
"Y": "L",
"M": "Z",
"Z": "M",
}
# -------------------------- extra rotors --------------------------
a_ :List[Any] = "RMDJXFUWGISLHVTCQNKYPBEZOA"
a_ :List[Any] = "SGLCPQWZHKXAREONTFBVIYJUDM"
a_ :Optional[int] = "HVSICLTYKQUBXDWAJZOMFGPREN"
a_ :List[Any] = "RZWQHFMVDBKICJLNTUXAGYPSOE"
a_ :int = "LFKIJODBEGAMQPXVUHYSTCZRWN"
a_ :Tuple = "KOAEGVDHXPQZMLFTYWJNBRCIUS"
def lowercase_ (A : RotorPositionT , A : RotorSelectionT , A : str ):
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(A ) )) < 3:
snake_case__ : Optional[Any] = F'''Please use 3 unique rotors (not {unique_rotsel})'''
raise Exception(A )
# Checks if rotor positions are valid
snake_case__ , snake_case__ , snake_case__ : str = rotpos
if not 0 < rotorposa <= len(A ):
snake_case__ : int = F'''First rotor position is not within range of 1..26 ({rotorposa}'''
raise ValueError(A )
if not 0 < rotorposa <= len(A ):
snake_case__ : Any = F'''Second rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(A )
if not 0 < rotorposa <= len(A ):
snake_case__ : str = F'''Third rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(A )
# Validates string and returns dict
snake_case__ : List[Any] = _plugboard(A )
return rotpos, rotsel, pbdict
def lowercase_ (A : str ):
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(A , A ):
snake_case__ : Dict = F'''Plugboard setting isn\'t type string ({type(A )})'''
raise TypeError(A )
elif len(A ) % 2 != 0:
snake_case__ : Optional[int] = F'''Odd number of symbols ({len(A )})'''
raise Exception(A )
elif pbstring == "":
return {}
pbstring.replace(' ' , '' )
# Checks if all characters are unique
snake_case__ : List[str] = set()
for i in pbstring:
if i not in abc:
snake_case__ : Any = F'''\'{i}\' not in list of symbols'''
raise Exception(A )
elif i in tmppbl:
snake_case__ : str = F'''Duplicate symbol ({i})'''
raise Exception(A )
else:
tmppbl.add(A )
del tmppbl
# Created the dictionary
snake_case__ : Dict = {}
for j in range(0 , len(A ) - 1 , 2 ):
snake_case__ : Any = pbstring[j + 1]
snake_case__ : Dict = pbstring[j]
return pb
def lowercase_ (A : str , A : RotorPositionT , A : RotorSelectionT = (rotora, rotora, rotora) , A : str = "" , ):
snake_case__ : List[str] = text.upper()
snake_case__ , snake_case__ , snake_case__ : str = _validator(
A , A , plugb.upper() )
snake_case__ , snake_case__ , snake_case__ : str = rotor_position
snake_case__ , snake_case__ , snake_case__ : int = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
snake_case__ : Tuple = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
snake_case__ : Union[str, Any] = plugboard[symbol]
# rotor ra --------------------------
snake_case__ : Optional[int] = abc.index(A ) + rotorposa
snake_case__ : Optional[Any] = rotora[index % len(A )]
# rotor rb --------------------------
snake_case__ : Optional[int] = abc.index(A ) + rotorposa
snake_case__ : Union[str, Any] = rotora[index % len(A )]
# rotor rc --------------------------
snake_case__ : Dict = abc.index(A ) + rotorposa
snake_case__ : List[str] = rotora[index % len(A )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
snake_case__ : Any = reflector[symbol]
# 2nd rotors
snake_case__ : Tuple = abc[rotora.index(A ) - rotorposa]
snake_case__ : List[str] = abc[rotora.index(A ) - rotorposa]
snake_case__ : List[str] = abc[rotora.index(A ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
snake_case__ : Union[str, Any] = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(A ):
snake_case__ : str = 0
rotorposa += 1
if rotorposa >= len(A ):
snake_case__ : Optional[Any] = 0
rotorposa += 1
if rotorposa >= len(A ):
snake_case__ : Tuple = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(A )
return "".join(A )
if __name__ == "__main__":
a_ :Tuple = "This is my Python script that emulates the Enigma machine from WWII."
a_ :List[Any] = (1, 1, 1)
a_ :List[Any] = "pictures"
a_ :Tuple = (rotora, rotora, rotora)
a_ :Optional[int] = enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| 277 |
from collections import deque
from .hash_table import HashTable
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any], *_snake_case : Optional[Any], **_snake_case : List[Any] ) ->Optional[int]:
super().__init__(*_snake_case, **_snake_case )
def lowercase_ ( self : Optional[Any], _snake_case : Tuple, _snake_case : Dict ) ->Dict:
snake_case__ : int = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_snake_case )
snake_case__ : Dict = self.values[key]
def lowercase_ ( self : Any ) ->Optional[Any]:
return (
sum(self.charge_factor - len(_snake_case ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def lowercase_ ( self : Union[str, Any], _snake_case : str, _snake_case : Optional[int]=None ) ->Optional[Any]:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_snake_case ) == 0
):
return key
return super()._collision_resolution(_snake_case, _snake_case )
| 277 | 1 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
_SCREAMING_SNAKE_CASE = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
_SCREAMING_SNAKE_CASE = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class SCREAMING_SNAKE_CASE_ :
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(__lowerCAmelCase )} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__lowerCAmelCase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can\'t be used in combination with --config_name or --model_name_or_path""" )
@dataclass
class SCREAMING_SNAKE_CASE_ :
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
__lowerCAmelCase = field(default=__lowerCAmelCase , metadata={"""help""": """The input training data file (a text file)."""} )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__lowerCAmelCase = field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__lowerCAmelCase = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
if self.train_file is not None:
UpperCamelCase = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
UpperCamelCase = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
'''simple docstring'''
with open(lowercase__ , """r""" , encoding="""utf-8""" ) as f:
UpperCamelCase = [json.loads(lowercase__ ) for line in f.read().splitlines() if (len(lowercase__ ) > 0 and not line.isspace())]
assert len(lowercase__ ) == len(lowercase__ )
UpperCamelCase = {c: dataset[c] for c in dataset.column_names}
UpperCamelCase = refs
return Dataset.from_dict(lowercase__ )
def lowercase( ) -> Tuple:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowercase__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
UpperCamelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
UpperCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""train[:{data_args.validation_split_percentage}%]""" , )
UpperCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""train[{data_args.validation_split_percentage}%:]""" , )
else:
UpperCamelCase = {}
if data_args.train_file is not None:
UpperCamelCase = data_args.train_file
if data_args.validation_file is not None:
UpperCamelCase = data_args.validation_file
UpperCamelCase = data_args.train_file.split(""".""" )[-1]
if extension == "txt":
UpperCamelCase = 'text'
UpperCamelCase = load_dataset(lowercase__ , data_files=lowercase__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCamelCase = AutoConfig.from_pretrained(model_args.config_name , **lowercase__ )
elif model_args.model_name_or_path:
UpperCamelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
UpperCamelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
UpperCamelCase = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
UpperCamelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowercase__ )
elif model_args.model_name_or_path:
UpperCamelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
UpperCamelCase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
UpperCamelCase = AutoModelForMaskedLM.from_config(lowercase__ )
model.resize_token_embeddings(len(lowercase__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
UpperCamelCase = datasets['train'].column_names
else:
UpperCamelCase = datasets['validation'].column_names
UpperCamelCase = 'text' if 'text' in column_names else column_names[0]
UpperCamelCase = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(UpperCamelCase_ ):
# Remove empty lines
UpperCamelCase = [line for line in examples['text'] if len(lowercase__ ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""] , padding=lowercase__ , truncation=lowercase__ , max_length=data_args.max_seq_length )
UpperCamelCase = datasets.map(
lowercase__ , batched=lowercase__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
UpperCamelCase = add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
UpperCamelCase = add_chinese_references(
tokenized_datasets["""validation"""] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
UpperCamelCase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
UpperCamelCase = False
# Data collator
# This one will take care of randomly masking the tokens.
UpperCamelCase = DataCollatorForWholeWordMask(tokenizer=lowercase__ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
UpperCamelCase = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
UpperCamelCase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
UpperCamelCase = model_args.model_name_or_path
else:
UpperCamelCase = None
UpperCamelCase = trainer.train(resume_from_checkpoint=lowercase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCamelCase = os.path.join(training_args.output_dir , """train_results.txt""" )
if trainer.is_world_process_zero():
with open(lowercase__ , """w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# Evaluation
UpperCamelCase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCamelCase = trainer.evaluate()
UpperCamelCase = math.exp(eval_output["""eval_loss"""] )
UpperCamelCase = perplexity
UpperCamelCase = os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(lowercase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
return results
def lowercase( UpperCamelCase_ ) -> Any:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 362 | def lowercase( UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
UpperCamelCase = 0
UpperCamelCase = len(UpperCamelCase_ )
for i in range(n - 1 ):
for j in range(i + 1 , UpperCamelCase_ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def lowercase( UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
if len(UpperCamelCase_ ) <= 1:
return arr, 0
UpperCamelCase = len(UpperCamelCase_ ) // 2
UpperCamelCase = arr[0:mid]
UpperCamelCase = arr[mid:]
UpperCamelCase , UpperCamelCase = count_inversions_recursive(UpperCamelCase_ )
UpperCamelCase , UpperCamelCase = count_inversions_recursive(UpperCamelCase_ )
UpperCamelCase , UpperCamelCase = _count_cross_inversions(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = UpperCamelCase = UpperCamelCase = 0
while i < len(UpperCamelCase_ ) and j < len(UpperCamelCase_ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(UpperCamelCase_ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(UpperCamelCase_ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def lowercase( ) -> List[str]:
'''simple docstring'''
UpperCamelCase = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
UpperCamelCase = count_inversions_bf(UpperCamelCase_ )
UpperCamelCase , UpperCamelCase = count_inversions_recursive(UpperCamelCase_ )
assert num_inversions_bf == num_inversions_recursive == 8
print("""number of inversions = """ , UpperCamelCase_ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
UpperCamelCase = count_inversions_bf(UpperCamelCase_ )
UpperCamelCase , UpperCamelCase = count_inversions_recursive(UpperCamelCase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , UpperCamelCase_ )
# an empty list should also have zero inversions
UpperCamelCase = []
UpperCamelCase = count_inversions_bf(UpperCamelCase_ )
UpperCamelCase , UpperCamelCase = count_inversions_recursive(UpperCamelCase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , UpperCamelCase_ )
if __name__ == "__main__":
main()
| 165 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCamelCase , "tf_padding" ) )
self.parent.assertTrue(hasattr(__UpperCamelCase , "depth_multiplier" ) )
class A__ :
def __init__( self , A_ , A_=13 , A_=3 , A_=32 , A_=0.25 , A_=8 , A_=True , A_=1024 , A_=32 , A_="relu6" , A_=0.1 , A_=0.02 , A_=True , A_=True , A_=10 , A_=None , ):
'''simple docstring'''
UpperCamelCase : Dict = parent
UpperCamelCase : Tuple = batch_size
UpperCamelCase : Dict = num_channels
UpperCamelCase : Any = image_size
UpperCamelCase : Any = depth_multiplier
UpperCamelCase : Dict = min_depth
UpperCamelCase : List[str] = tf_padding
UpperCamelCase : List[str] = int(last_hidden_size * depth_multiplier )
UpperCamelCase : str = output_stride
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : Tuple = classifier_dropout_prob
UpperCamelCase : Optional[Any] = use_labels
UpperCamelCase : Union[str, Any] = is_training
UpperCamelCase : Union[str, Any] = num_labels
UpperCamelCase : str = initializer_range
UpperCamelCase : List[str] = scope
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : List[Any] = None
UpperCamelCase : Any = None
if self.use_labels:
UpperCamelCase : str = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCamelCase( self ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : int = MobileNetVaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase : str = model(__UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.num_labels
UpperCamelCase : Union[str, Any] = MobileNetVaForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase : Optional[int] = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : int = config_and_inputs
UpperCamelCase : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCAmelCase :Optional[Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
_UpperCAmelCase :List[str] = (
{'feature-extraction': MobileNetVaModel, 'image-classification': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase :Tuple = False
_UpperCAmelCase :int = False
_UpperCAmelCase :List[Any] = False
_UpperCAmelCase :str = False
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = MobileNetVaModelTester(self )
UpperCamelCase : List[Any] = MobileNetVaConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase )
def __UpperCamelCase( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds" )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings" )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV1 does not output attentions" )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Any = model_class(__UpperCamelCase )
UpperCamelCase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : Any = [*signature.parameters.keys()]
UpperCamelCase : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __UpperCamelCase( self ):
'''simple docstring'''
def check_hidden_states_output(A_ , A_ , A_ ):
UpperCamelCase : int = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase : Union[str, Any] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCamelCase : Any = outputs.hidden_states
UpperCamelCase : List[Any] = 26
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
UpperCamelCase , UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Optional[Any] = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase : Optional[Any] = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Optional[int] = MobileNetVaModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def A_ ( ) -> str:
UpperCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase( self ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None
)
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(__UpperCamelCase )
UpperCamelCase : Dict = self.default_image_processor
UpperCamelCase : Dict = prepare_img()
UpperCamelCase : Any = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
UpperCamelCase : str = model(**__UpperCamelCase )
# verify the logits
UpperCamelCase : Union[str, Any] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
UpperCamelCase : List[Any] = torch.tensor([-4.17_39, -1.12_33, 3.12_05] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
| 52 |
"""simple docstring"""
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def lowercase ( _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
_UpperCAmelCase = args.pruning_method
_UpperCAmelCase = args.threshold
_UpperCAmelCase = args.model_name_or_path.rstrip('''/''' )
_UpperCAmelCase = args.target_model_path
print(f'Load fine-pruned model from {model_name_or_path}' )
_UpperCAmelCase = torch.load(os.path.join(_SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) )
_UpperCAmelCase = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_UpperCAmelCase = tensor
print(f'Copied layer {name}' )
elif "classifier" in name or "qa_output" in name:
_UpperCAmelCase = tensor
print(f'Copied layer {name}' )
elif "bias" in name:
_UpperCAmelCase = tensor
print(f'Copied layer {name}' )
else:
if pruning_method == "magnitude":
_UpperCAmelCase = MagnitudeBinarizer.apply(inputs=_SCREAMING_SNAKE_CASE , threshold=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_UpperCAmelCase = name[:-6]
_UpperCAmelCase = model[f'{prefix_}mask_scores']
_UpperCAmelCase = TopKBinarizer.apply(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_UpperCAmelCase = name[:-6]
_UpperCAmelCase = model[f'{prefix_}mask_scores']
_UpperCAmelCase = ThresholdBinarizer.apply(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_UpperCAmelCase = name[:-6]
_UpperCAmelCase = model[f'{prefix_}mask_scores']
_UpperCAmelCase , _UpperCAmelCase = -0.1, 1.1
_UpperCAmelCase = torch.sigmoid(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = s * (r - l) + l
_UpperCAmelCase = s_bar.clamp(min=0.0 , max=1.0 )
_UpperCAmelCase = tensor * mask
print(f'Pruned layer {name}' )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
_UpperCAmelCase = os.path.join(
os.path.dirname(_SCREAMING_SNAKE_CASE ) , f'bertarized_{os.path.basename(_SCREAMING_SNAKE_CASE )}' )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
shutil.copytree(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(f'\nCreated folder {target_model_path}' )
torch.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
__A : Optional[int] = parser.parse_args()
main(args)
| 260 | 0 |
import heapq
import sys
import numpy as np
UpperCamelCase_ = tuple[int, int]
class _a :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : Optional[Any] = set()
def UpperCamelCase_ ( self ):
'''simple docstring'''
if not self.empty():
return self.elements[0][0]
else:
return float('inf' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return len(self.elements ) == 0
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
if item not in self.set:
heapq.heappush(self.elements, (priority, item) )
self.set.add(A )
else:
# update
# print("update", item)
SCREAMING_SNAKE_CASE : Union[str, Any] = []
(SCREAMING_SNAKE_CASE) : Any = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
(SCREAMING_SNAKE_CASE) : Any = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements, (pro, xxx) )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if item in self.set:
self.set.remove(A )
SCREAMING_SNAKE_CASE : str = []
(SCREAMING_SNAKE_CASE) : List[Any] = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
(SCREAMING_SNAKE_CASE) : Optional[int] = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements, (prito, yyy) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.elements[0][1]
def UpperCamelCase_ ( self ):
'''simple docstring'''
(SCREAMING_SNAKE_CASE) : List[str] = heapq.heappop(self.elements )
self.set.remove(A )
return (priority, item)
def lowercase__( __UpperCamelCase: TPos ,__UpperCamelCase: TPos ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = np.array(__UpperCamelCase )
SCREAMING_SNAKE_CASE : str = np.array(__UpperCamelCase )
return np.linalg.norm(a - b )
def lowercase__( __UpperCamelCase: TPos ,__UpperCamelCase: TPos ):
"""simple docstring"""
return consistent_heuristic(__UpperCamelCase ,__UpperCamelCase ) // t
def lowercase__( __UpperCamelCase: TPos ,__UpperCamelCase: TPos ):
"""simple docstring"""
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def lowercase__( __UpperCamelCase: TPos ,__UpperCamelCase: int ,__UpperCamelCase: TPos ,__UpperCamelCase: dict[TPos, float] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = g_function[start] + Wa * heuristics[i](__UpperCamelCase ,__UpperCamelCase )
return ans
def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = np.chararray((n, n) )
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : Any = '*'
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
if (j, (n - 1) - i) in blocks:
SCREAMING_SNAKE_CASE : Dict = '#'
SCREAMING_SNAKE_CASE : Union[str, Any] = '-'
SCREAMING_SNAKE_CASE : Any = back_pointer[goal]
while x != start:
(SCREAMING_SNAKE_CASE) : List[Any] = x
# print(x)
SCREAMING_SNAKE_CASE : Tuple = '-'
SCREAMING_SNAKE_CASE : Dict = back_pointer[x]
SCREAMING_SNAKE_CASE : Tuple = '-'
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] ,end=' ' )
print('<-- End position' ,end=' ' )
else:
print(grid[i][j] ,end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
SCREAMING_SNAKE_CASE : Optional[int] = back_pointer[goal]
while x != start:
print(__UpperCamelCase ,end=' ' )
SCREAMING_SNAKE_CASE : Any = back_pointer[x]
print(__UpperCamelCase )
sys.exit()
def lowercase__( __UpperCamelCase: TPos ):
"""simple docstring"""
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: Any ,__UpperCamelCase: int ,__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: str ,__UpperCamelCase: Dict ,__UpperCamelCase: Any ,__UpperCamelCase: Dict ,):
"""simple docstring"""
for itera in range(__UpperCamelCase ):
open_list[itera].remove_element(__UpperCamelCase )
# print("s", s)
# print("j", j)
(SCREAMING_SNAKE_CASE) : List[str] = s
SCREAMING_SNAKE_CASE : List[str] = (x - 1, y)
SCREAMING_SNAKE_CASE : str = (x + 1, y)
SCREAMING_SNAKE_CASE : Dict = (x, y + 1)
SCREAMING_SNAKE_CASE : Any = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(__UpperCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Dict = -1
SCREAMING_SNAKE_CASE : Optional[Any] = float('inf' )
if valid(__UpperCamelCase ) and g_function[neighbours] > g_function[s] + 1:
SCREAMING_SNAKE_CASE : Tuple = g_function[s] + 1
SCREAMING_SNAKE_CASE : Any = s
if neighbours not in close_list_anchor:
open_list[0].put(__UpperCamelCase ,key(__UpperCamelCase ,0 ,__UpperCamelCase ,__UpperCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 ,__UpperCamelCase ):
if key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) <= Wa * key(
__UpperCamelCase ,0 ,__UpperCamelCase ,__UpperCamelCase ):
open_list[j].put(
__UpperCamelCase ,key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = []
for x in range(1 ,5 ):
for y in range(1 ,6 ):
some_list.append((x, y) )
for x in range(15 ,20 ):
some_list.append((x, 17) )
for x in range(10 ,19 ):
for y in range(1 ,15 ):
some_list.append((x, y) )
# L block
for x in range(1 ,4 ):
for y in range(12 ,19 ):
some_list.append((x, y) )
for x in range(3 ,13 ):
for y in range(16 ,19 ):
some_list.append((x, y) )
return some_list
UpperCamelCase_ = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
UpperCamelCase_ = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(1_0, 1),
(1_1, 1),
(1_2, 1),
(1_3, 1),
(1_4, 1),
(1_5, 1),
(1_6, 1),
(1_7, 1),
(1_8, 1),
(1_9, 1),
]
UpperCamelCase_ = make_common_ground()
UpperCamelCase_ = blocks_blk
# hyper parameters
UpperCamelCase_ = 1
UpperCamelCase_ = 1
UpperCamelCase_ = 2_0
UpperCamelCase_ = 3 # one consistent and two other inconsistent
# start and end destination
UpperCamelCase_ = (0, 0)
UpperCamelCase_ = (n - 1, n - 1)
UpperCamelCase_ = 1
def lowercase__( __UpperCamelCase: TPos ,__UpperCamelCase: TPos ,__UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = {start: 0, goal: float('inf' )}
SCREAMING_SNAKE_CASE : Optional[int] = {start: -1, goal: -1}
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : str = set()
for i in range(__UpperCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(__UpperCamelCase ,key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : list[int] = []
SCREAMING_SNAKE_CASE : list[int] = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 ,__UpperCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE : List[str] = open_list[i].top_show()
visited.add(__UpperCamelCase )
expand_state(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,)
close_list_inad.append(__UpperCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE : Optional[int] = open_list[0].top_show()
visited.add(__UpperCamelCase )
expand_state(
__UpperCamelCase ,0 ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,)
close_list_anchor.append(__UpperCamelCase )
print('No path found to goal' )
print()
for i in range(n - 1 ,-1 ,-1 ):
for j in range(__UpperCamelCase ):
if (j, i) in blocks:
print('#' ,end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' ,end=' ' )
else:
print('-' ,end=' ' )
else:
print('*' ,end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' ,end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 370 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"vocab_file": "vocab.txt",
"merges_file": "bpe.codes",
}
UpperCamelCase_ = {
"vocab_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt",
},
"merges_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes",
},
}
UpperCamelCase_ = {
"vinai/phobert-base": 2_5_6,
"vinai/phobert-large": 2_5_6,
}
def lowercase__( __UpperCamelCase: str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = set()
SCREAMING_SNAKE_CASE : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE : int = char
SCREAMING_SNAKE_CASE : str = set(__UpperCamelCase )
return pairs
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Any = VOCAB_FILES_NAMES
A : List[str] = PRETRAINED_VOCAB_FILES_MAP
A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self, A, A, A="<s>", A="</s>", A="</s>", A="<s>", A="<unk>", A="<pad>", A="<mask>", **A, ):
'''simple docstring'''
super().__init__(
bos_token=A, eos_token=A, unk_token=A, sep_token=A, cls_token=A, pad_token=A, mask_token=A, **A, )
SCREAMING_SNAKE_CASE : Tuple = vocab_file
SCREAMING_SNAKE_CASE : str = merges_file
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Any = 1
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : Dict = 3
self.add_from_file(A )
SCREAMING_SNAKE_CASE : Optional[Any] = {v: k for k, v in self.encoder.items()}
with open(A, encoding='utf-8' ) as merges_handle:
SCREAMING_SNAKE_CASE : int = merges_handle.read().split('\n' )[:-1]
SCREAMING_SNAKE_CASE : List[Any] = [tuple(merge.split()[:-1] ) for merge in merges]
SCREAMING_SNAKE_CASE : Tuple = dict(zip(A, range(len(A ) ) ) )
SCREAMING_SNAKE_CASE : List[Any] = {}
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE : int = [self.cls_token_id]
SCREAMING_SNAKE_CASE : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase_ ( self, A, A = None, A = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A, token_ids_a=A, already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return len(self.encoder )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return dict(self.encoder, **self.added_tokens_encoder )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE : Any = tuple(A )
SCREAMING_SNAKE_CASE : List[Any] = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
SCREAMING_SNAKE_CASE : Optional[Any] = get_pairs(A )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE : int = min(A, key=lambda A : self.bpe_ranks.get(A, float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = bigram
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : Optional[Any] = 0
while i < len(A ):
try:
SCREAMING_SNAKE_CASE : str = word.index(A, A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE : List[str] = j
if word[i] == first and i < len(A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE : List[str] = tuple(A )
SCREAMING_SNAKE_CASE : Any = new_word
if len(A ) == 1:
break
else:
SCREAMING_SNAKE_CASE : Optional[Any] = get_pairs(A )
SCREAMING_SNAKE_CASE : Optional[Any] = '@@ '.join(A )
SCREAMING_SNAKE_CASE : Optional[int] = word[:-4]
SCREAMING_SNAKE_CASE : Any = word
return word
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : Tuple = re.findall(r'\S+\n?', A )
for token in words:
split_tokens.extend(list(self.bpe(A ).split(' ' ) ) )
return split_tokens
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
return self.encoder.get(A, self.encoder.get(self.unk_token ) )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
return self.decoder.get(A, self.unk_token )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = ' '.join(A ).replace('@@ ', '' ).strip()
return out_string
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
if not os.path.isdir(A ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(
A, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(
A, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file, A )
if os.path.abspath(self.merges_file ) != os.path.abspath(A ):
copyfile(self.merges_file, A )
return out_vocab_file, out_merge_file
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if isinstance(A, A ):
try:
with open(A, 'r', encoding='utf-8' ) as fd:
self.add_from_file(A )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F"Incorrect encoding detected in {f}, please rebuild the dataset" )
return
SCREAMING_SNAKE_CASE : int = f.readlines()
for lineTmp in lines:
SCREAMING_SNAKE_CASE : List[str] = lineTmp.strip()
SCREAMING_SNAKE_CASE : Optional[Any] = line.rfind(' ' )
if idx == -1:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt>\'' )
SCREAMING_SNAKE_CASE : Optional[int] = line[:idx]
SCREAMING_SNAKE_CASE : Optional[Any] = len(self.encoder )
| 246 | 0 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=2 , UpperCamelCase__=24 , UpperCamelCase__=16 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=10 , UpperCamelCase__=0.02 , UpperCamelCase__=None , UpperCamelCase__=2 , UpperCamelCase__=2 , ) -> List[Any]:
lowerCamelCase : Union[str, Any] = parent
lowerCamelCase : Any = batch_size
lowerCamelCase : List[Any] = patch_size
lowerCamelCase : List[str] = max_length
lowerCamelCase : Optional[Any] = num_mel_bins
lowerCamelCase : Optional[int] = is_training
lowerCamelCase : Optional[int] = use_labels
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Union[str, Any] = num_hidden_layers
lowerCamelCase : str = num_attention_heads
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : int = type_sequence_label_size
lowerCamelCase : int = initializer_range
lowerCamelCase : Union[str, Any] = scope
lowerCamelCase : str = frequency_stride
lowerCamelCase : Union[str, Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowerCamelCase : Optional[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
lowerCamelCase : Union[str, Any] = (self.max_length - self.patch_size) // self.time_stride + 1
lowerCamelCase : str = frequency_out_dimension * time_out_dimension
lowerCamelCase : str = num_patches + 2
def _lowercase ( self ) -> List[str]:
lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
lowerCamelCase : List[Any] = None
if self.use_labels:
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : int = self.get_config()
return config, input_values, labels
def _lowercase ( self ) -> Optional[Any]:
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
lowerCamelCase : Union[str, Any] = ASTModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : int = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : int = config_and_inputs
lowerCamelCase : int = {"input_values": input_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ (lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Tuple = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase_ : Optional[int] = (
{"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel}
if is_torch_available()
else {}
)
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : List[Any] = False
lowerCamelCase_ : Any = False
lowerCamelCase_ : List[Any] = False
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _lowercase ( self ) -> int:
lowerCamelCase : List[Any] = ASTModelTester(self )
lowerCamelCase : Any = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def _lowercase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def _lowercase ( self ) -> List[Any]:
pass
def _lowercase ( self ) -> str:
lowerCamelCase , lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[str] = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def _lowercase ( self ) -> Dict:
lowerCamelCase , lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Dict = model_class(UpperCamelCase__ )
lowerCamelCase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : int = [*signature.parameters.keys()]
lowerCamelCase : Union[str, Any] = ["input_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
@slow
def _lowercase ( self ) -> str:
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : str = ASTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def A ( ) -> Optional[Any]:
lowerCamelCase : Union[str, Any] = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" ,filename="sample_audio.flac" ,repo_type="dataset" )
lowerCamelCase , lowerCamelCase : List[str] = torchaudio.load(_SCREAMING_SNAKE_CASE )
return audio, sampling_rate
@require_torch
@require_torchaudio
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self ) -> List[Any]:
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : List[Any] = self.default_feature_extractor
lowerCamelCase : Dict = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(UpperCamelCase__ )
lowerCamelCase : Dict = self.default_feature_extractor
lowerCamelCase , lowerCamelCase : int = prepare_audio()
lowerCamelCase : Union[str, Any] = audio.squeeze().numpy()
lowerCamelCase : str = feature_extractor(UpperCamelCase__ , sampling_rate=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase : List[str] = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase : List[Any] = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 48 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : int = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'xlm-roberta'
def __init__( self : List[Any] , lowerCAmelCase_ : str=3_05_22 , lowerCAmelCase_ : Any=7_68 , lowerCAmelCase_ : Any=12 , lowerCAmelCase_ : Tuple=12 , lowerCAmelCase_ : List[Any]=30_72 , lowerCAmelCase_ : Tuple="gelu" , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Any=5_12 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : str=1e-12 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : Optional[int]=0 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Any="absolute" , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Dict=None , **lowerCAmelCase_ : int , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
A__ : Any =vocab_size
A__ : Any =hidden_size
A__ : Any =num_hidden_layers
A__ : str =num_attention_heads
A__ : Union[str, Any] =hidden_act
A__ : Union[str, Any] =intermediate_size
A__ : Optional[Any] =hidden_dropout_prob
A__ : List[Any] =attention_probs_dropout_prob
A__ : Dict =max_position_embeddings
A__ : int =type_vocab_size
A__ : Any =initializer_range
A__ : Union[str, Any] =layer_norm_eps
A__ : str =position_embedding_type
A__ : str =use_cache
A__ : Any =classifier_dropout
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
@property
def lowercase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A__ : Union[str, Any] ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A__ : str ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 134 | 0 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
__SCREAMING_SNAKE_CASE :List[Any] = '''\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
'''
__SCREAMING_SNAKE_CASE :int = '''
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
'''
__SCREAMING_SNAKE_CASE :List[str] = '''
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the CUAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
\'aupr\': Area Under the Precision-Recall curve
\'prec_at_80_recall\': Precision at 80% recall
\'prec_at_90_recall\': Precision at 90% recall
Examples:
>>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> cuad_metric = datasets.load_metric("cuad")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def lowercase ( self : List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def lowercase ( self : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : List[str] ):
_UpperCAmelCase = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
_UpperCAmelCase = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
_UpperCAmelCase = evaluate(dataset=snake_case_ , predictions=snake_case_ )
return score
| 156 |
'''simple docstring'''
from sklearn.metrics import recall_score
import datasets
__SCREAMING_SNAKE_CASE :Any = '''
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
'''
__SCREAMING_SNAKE_CASE :Tuple = '''
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{\'recall\': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{\'recall\': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{\'recall\': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'recall\': array([1., 0., 0.])}
'''
__SCREAMING_SNAKE_CASE :Optional[int] = '''
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def lowercase ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def lowercase ( self : Tuple , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Optional[int]=None , snake_case_ : str=1 , snake_case_ : str="binary" , snake_case_ : int=None , snake_case_ : List[Any]="warn" , ):
_UpperCAmelCase = recall_score(
snake_case_ , snake_case_ , labels=snake_case_ , pos_label=snake_case_ , average=snake_case_ , sample_weight=snake_case_ , zero_division=snake_case_ , )
return {"recall": float(snake_case_ ) if score.size == 1 else score}
| 156 | 1 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__snake_case =get_logger()
__snake_case =None
class UpperCAmelCase_ ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ):
def __init__( self : Optional[int] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : List[Any]=None , **UpperCAmelCase__ : Dict ) -> int:
super().__init__(features=UpperCAmelCase__ )
import jax
from jaxlib.xla_client import Device
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(
F'''Expected {device} to be a `str` not {type(UpperCAmelCase__ )}, as `jaxlib.xla_extension.Device` '''
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
lowerCAmelCase = device if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCAmelCase = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F'''Device with string identifier {self.device} not listed among the available '''
F'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
F'''device: {str(jax.devices()[0] )}.''' )
lowerCAmelCase = str(jax.devices()[0] )
lowerCAmelCase = jnp_array_kwargs
@staticmethod
def __UpperCAmelCase ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
import jax
return {str(UpperCAmelCase__ ): device for device in jax.devices()}
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
import jax
import jax.numpy as jnp
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and column:
if all(
isinstance(UpperCAmelCase__ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(UpperCAmelCase__ , axis=0 )
return column
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Union[str, Any] ) -> List[Any]:
import jax
import jax.numpy as jnp
if isinstance(UpperCAmelCase__ , (str, bytes, type(UpperCAmelCase__ )) ):
return value
elif isinstance(UpperCAmelCase__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowerCAmelCase = {}
if isinstance(UpperCAmelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
lowerCAmelCase = {'dtype': jnp.intaa}
else:
lowerCAmelCase = {'dtype': jnp.intaa}
elif isinstance(UpperCAmelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowerCAmelCase = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCAmelCase__ , PIL.Image.Image ):
lowerCAmelCase = np.asarray(UpperCAmelCase__ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCAmelCase = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(UpperCAmelCase__ , **{**default_dtype, **self.jnp_array_kwargs} )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Optional[int] ) -> str:
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(UpperCAmelCase__ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(UpperCAmelCase__ , '__array__' ) and not isinstance(UpperCAmelCase__ , jax.Array ):
lowerCAmelCase = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCAmelCase__ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCAmelCase__ ) for substruct in data_struct] )
elif isinstance(UpperCAmelCase__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCAmelCase__ ) for substruct in data_struct] )
return self._tensorize(UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : dict ) -> str:
return map_nested(self._recursive_tensorize , UpperCAmelCase__ , map_list=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : pa.Table ) -> Mapping:
lowerCAmelCase = self.numpy_arrow_extractor().extract_row(UpperCAmelCase__ )
lowerCAmelCase = self.python_features_decoder.decode_row(UpperCAmelCase__ )
return self.recursive_tensorize(UpperCAmelCase__ )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : pa.Table ) -> "jax.Array":
lowerCAmelCase = self.numpy_arrow_extractor().extract_column(UpperCAmelCase__ )
lowerCAmelCase = self.python_features_decoder.decode_column(UpperCAmelCase__ , pa_table.column_names[0] )
lowerCAmelCase = self.recursive_tensorize(UpperCAmelCase__ )
lowerCAmelCase = self._consolidate(UpperCAmelCase__ )
return column
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : pa.Table ) -> Mapping:
lowerCAmelCase = self.numpy_arrow_extractor().extract_batch(UpperCAmelCase__ )
lowerCAmelCase = self.python_features_decoder.decode_batch(UpperCAmelCase__ )
lowerCAmelCase = self.recursive_tensorize(UpperCAmelCase__ )
for column_name in batch:
lowerCAmelCase = self._consolidate(batch[column_name] )
return batch
| 4 |
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Union[str, Any] = ['''image_processor''']
A__ : Any = '''SamImageProcessor'''
def __init__( self : Tuple , _snake_case : Tuple ):
super().__init__(_snake_case )
__lowercase : str = self.image_processor
__lowercase : Any = -10
__lowercase : Dict = self.image_processor.size['''longest_edge''']
def __call__( self : Dict , _snake_case : str=None , _snake_case : Any=None , _snake_case : List[str]=None , _snake_case : Any=None , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : List[Any] , ):
__lowercase : List[str] = self.image_processor(
_snake_case , return_tensors=_snake_case , **_snake_case , )
# pop arguments that are not used in the foward but used nevertheless
__lowercase : Optional[int] = encoding_image_processor['''original_sizes''']
if hasattr(_snake_case , '''numpy''' ): # Checks if Torch or TF tensor
__lowercase : Optional[int] = original_sizes.numpy()
__lowercase , __lowercase , __lowercase : str = self._check_and_preprocess_points(
input_points=_snake_case , input_labels=_snake_case , input_boxes=_snake_case , )
__lowercase : int = self._normalize_and_convert(
_snake_case , _snake_case , input_points=_snake_case , input_labels=_snake_case , input_boxes=_snake_case , return_tensors=_snake_case , )
return encoding_image_processor
def snake_case_ ( self : List[str] , _snake_case : int , _snake_case : Optional[int] , _snake_case : Any=None , _snake_case : Optional[Any]=None , _snake_case : Any=None , _snake_case : str="pt" , ):
if input_points is not None:
if len(_snake_case ) != len(_snake_case ):
__lowercase : Optional[Any] = [
self._normalize_coordinates(self.target_size , _snake_case , original_sizes[0] ) for point in input_points
]
else:
__lowercase : List[Any] = [
self._normalize_coordinates(self.target_size , _snake_case , _snake_case )
for point, original_size in zip(_snake_case , _snake_case )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__lowercase , __lowercase : Tuple = self._pad_points_and_labels(_snake_case , _snake_case )
__lowercase : Dict = np.array(_snake_case )
if input_labels is not None:
__lowercase : Dict = np.array(_snake_case )
if input_boxes is not None:
if len(_snake_case ) != len(_snake_case ):
__lowercase : Union[str, Any] = [
self._normalize_coordinates(self.target_size , _snake_case , original_sizes[0] , is_bounding_box=_snake_case )
for box in input_boxes
]
else:
__lowercase : Tuple = [
self._normalize_coordinates(self.target_size , _snake_case , _snake_case , is_bounding_box=_snake_case )
for box, original_size in zip(_snake_case , _snake_case )
]
__lowercase : Dict = np.array(_snake_case )
if input_boxes is not None:
if return_tensors == "pt":
__lowercase : int = torch.from_numpy(_snake_case )
# boxes batch size of 1 by default
__lowercase : List[Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__lowercase : Dict = tf.convert_to_tensor(_snake_case )
# boxes batch size of 1 by default
__lowercase : int = tf.expand_dims(_snake_case , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'''input_boxes''': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__lowercase : Tuple = torch.from_numpy(_snake_case )
# point batch size of 1 by default
__lowercase : Tuple = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__lowercase : List[Any] = tf.convert_to_tensor(_snake_case )
# point batch size of 1 by default
__lowercase : Optional[int] = tf.expand_dims(_snake_case , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'''input_points''': input_points} )
if input_labels is not None:
if return_tensors == "pt":
__lowercase : int = torch.from_numpy(_snake_case )
# point batch size of 1 by default
__lowercase : Any = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__lowercase : Any = tf.convert_to_tensor(_snake_case )
# point batch size of 1 by default
__lowercase : Union[str, Any] = tf.expand_dims(_snake_case , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'''input_labels''': input_labels} )
return encoding_image_processor
def snake_case_ ( self : int , _snake_case : Any , _snake_case : str ):
__lowercase : Union[str, Any] = max([point.shape[0] for point in input_points] )
__lowercase : List[Any] = []
for i, point in enumerate(_snake_case ):
if point.shape[0] != expected_nb_points:
__lowercase : Optional[Any] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
__lowercase : Tuple = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(_snake_case )
__lowercase : List[Any] = processed_input_points
return input_points, input_labels
def snake_case_ ( self : Dict , _snake_case : int , _snake_case : np.ndarray , _snake_case : Any , _snake_case : Any=False ):
__lowercase , __lowercase : Tuple = original_size
__lowercase , __lowercase : Optional[Any] = self.image_processor._get_preprocess_shape(_snake_case , longest_edge=_snake_case )
__lowercase : Optional[int] = deepcopy(_snake_case ).astype(_snake_case )
if is_bounding_box:
__lowercase : str = coords.reshape(-1 , 2 , 2 )
__lowercase : Dict = coords[..., 0] * (new_w / old_w)
__lowercase : int = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__lowercase : Optional[Any] = coords.reshape(-1 , 4 )
return coords
def snake_case_ ( self : List[str] , _snake_case : List[Any]=None , _snake_case : Any=None , _snake_case : int=None , ):
if input_points is not None:
if hasattr(_snake_case , '''numpy''' ): # Checks for TF or Torch tensor
__lowercase : Tuple = input_points.numpy().tolist()
if not isinstance(_snake_case , _snake_case ) or not isinstance(input_points[0] , _snake_case ):
raise ValueError('''Input points must be a list of list of floating points.''' )
__lowercase : str = [np.array(_snake_case ) for input_point in input_points]
else:
__lowercase : str = None
if input_labels is not None:
if hasattr(_snake_case , '''numpy''' ):
__lowercase : Any = input_labels.numpy().tolist()
if not isinstance(_snake_case , _snake_case ) or not isinstance(input_labels[0] , _snake_case ):
raise ValueError('''Input labels must be a list of list integers.''' )
__lowercase : List[Any] = [np.array(_snake_case ) for label in input_labels]
else:
__lowercase : Tuple = None
if input_boxes is not None:
if hasattr(_snake_case , '''numpy''' ):
__lowercase : str = input_boxes.numpy().tolist()
if (
not isinstance(_snake_case , _snake_case )
or not isinstance(input_boxes[0] , _snake_case )
or not isinstance(input_boxes[0][0] , _snake_case )
):
raise ValueError('''Input boxes must be a list of list of list of floating points.''' )
__lowercase : List[Any] = [np.array(_snake_case ).astype(np.floataa ) for box in input_boxes]
else:
__lowercase : Dict = None
return input_points, input_labels, input_boxes
@property
def snake_case_ ( self : List[Any] ):
__lowercase : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(_snake_case ) )
def snake_case_ ( self : str , *_snake_case : Union[str, Any] , **_snake_case : Dict ):
return self.image_processor.post_process_masks(*_snake_case , **_snake_case )
| 156 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__magic_name__ = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__magic_name__ = "UperNetConfig"
class lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case = 0 , _snake_case = False , _snake_case = 1 , ) -> None:
"""simple docstring"""
super().__init__()
UpperCAmelCase = nn.Convad(
in_channels=_snake_case , out_channels=_snake_case , kernel_size=_snake_case , padding=_snake_case , bias=_snake_case , dilation=_snake_case , )
UpperCAmelCase = nn.BatchNormad(_snake_case )
UpperCAmelCase = nn.ReLU()
def snake_case_ ( self , _snake_case ) -> torch.Tensor:
"""simple docstring"""
UpperCAmelCase = self.conv(_snake_case )
UpperCAmelCase = self.batch_norm(_snake_case )
UpperCAmelCase = self.activation(_snake_case )
return output
class lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , _snake_case , _snake_case , _snake_case ) -> None:
"""simple docstring"""
super().__init__()
UpperCAmelCase = [
nn.AdaptiveAvgPoolad(_snake_case ),
UperNetConvModule(_snake_case , _snake_case , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(_snake_case ) , _snake_case )
def snake_case_ ( self , _snake_case ) -> torch.Tensor:
"""simple docstring"""
UpperCAmelCase = input
for layer in self.layers:
UpperCAmelCase = layer(_snake_case )
return hidden_state
class lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case ) -> None:
"""simple docstring"""
super().__init__()
UpperCAmelCase = pool_scales
UpperCAmelCase = align_corners
UpperCAmelCase = in_channels
UpperCAmelCase = channels
UpperCAmelCase = []
for i, pool_scale in enumerate(_snake_case ):
UpperCAmelCase = UperNetPyramidPoolingBlock(pool_scale=_snake_case , in_channels=_snake_case , channels=_snake_case )
self.blocks.append(_snake_case )
self.add_module(str(_snake_case ) , _snake_case )
def snake_case_ ( self , _snake_case ) -> List[torch.Tensor]:
"""simple docstring"""
UpperCAmelCase = []
for ppm in self.blocks:
UpperCAmelCase = ppm(_snake_case )
UpperCAmelCase = nn.functional.interpolate(
_snake_case , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(_snake_case )
return ppm_outs
class lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , _snake_case , _snake_case ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCAmelCase = config
UpperCAmelCase = config.pool_scales # e.g. (1, 2, 3, 6)
UpperCAmelCase = in_channels
UpperCAmelCase = config.hidden_size
UpperCAmelCase = False
UpperCAmelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
UpperCAmelCase = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
UpperCAmelCase = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
UpperCAmelCase = nn.ModuleList()
UpperCAmelCase = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
UpperCAmelCase = UperNetConvModule(_snake_case , self.channels , kernel_size=1 )
UpperCAmelCase = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(_snake_case )
self.fpn_convs.append(_snake_case )
UpperCAmelCase = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
self.apply(self._init_weights )
def snake_case_ ( self , _snake_case ) -> Dict:
"""simple docstring"""
if isinstance(_snake_case , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def snake_case_ ( self , _snake_case ) -> Dict:
"""simple docstring"""
UpperCAmelCase = inputs[-1]
UpperCAmelCase = [x]
psp_outs.extend(self.psp_modules(_snake_case ) )
UpperCAmelCase = torch.cat(_snake_case , dim=1 )
UpperCAmelCase = self.bottleneck(_snake_case )
return output
def snake_case_ ( self , _snake_case ) -> torch.Tensor:
"""simple docstring"""
# build laterals
UpperCAmelCase = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(_snake_case ) )
# build top-down path
UpperCAmelCase = len(_snake_case )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCAmelCase = laterals[i - 1].shape[2:]
UpperCAmelCase = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=_snake_case , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
UpperCAmelCase = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCAmelCase = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
UpperCAmelCase = torch.cat(_snake_case , dim=1 )
UpperCAmelCase = self.fpn_bottleneck(_snake_case )
UpperCAmelCase = self.classifier(_snake_case )
return output
class lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , _snake_case , _snake_case = 2 , _snake_case = 3 , _snake_case = 1 ) -> None:
"""simple docstring"""
super().__init__()
UpperCAmelCase = config
UpperCAmelCase = config.auxiliary_in_channels
UpperCAmelCase = config.auxiliary_channels
UpperCAmelCase = config.auxiliary_num_convs
UpperCAmelCase = config.auxiliary_concat_input
UpperCAmelCase = in_index
UpperCAmelCase = (kernel_size // 2) * dilation
UpperCAmelCase = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=_snake_case , padding=_snake_case , dilation=_snake_case ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=_snake_case , padding=_snake_case , dilation=_snake_case ) )
if self.num_convs == 0:
UpperCAmelCase = nn.Identity()
else:
UpperCAmelCase = nn.Sequential(*_snake_case )
if self.concat_input:
UpperCAmelCase = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=_snake_case , padding=kernel_size // 2 )
UpperCAmelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def snake_case_ ( self ) -> Any:
"""simple docstring"""
self.apply(self._init_weights )
def snake_case_ ( self , _snake_case ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(_snake_case , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def snake_case_ ( self , _snake_case ) -> torch.Tensor:
"""simple docstring"""
# just take the relevant feature maps
UpperCAmelCase = encoder_hidden_states[self.in_index]
UpperCAmelCase = self.convs(_snake_case )
if self.concat_input:
UpperCAmelCase = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
UpperCAmelCase = self.classifier(_snake_case )
return output
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = UperNetConfig
__SCREAMING_SNAKE_CASE = """pixel_values"""
__SCREAMING_SNAKE_CASE = True
def snake_case_ ( self , _snake_case ) -> Optional[int]:
"""simple docstring"""
if isinstance(_snake_case , _snake_case ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def snake_case_ ( self , _snake_case , _snake_case=False ) -> int:
"""simple docstring"""
if isinstance(_snake_case , _snake_case ):
UpperCAmelCase = value
__magic_name__ = r"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__magic_name__ = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""" , A__ , )
class lowercase ( A__ ):
'''simple docstring'''
def __init__( self , _snake_case ) -> List[str]:
"""simple docstring"""
super().__init__(_snake_case )
UpperCAmelCase = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
UpperCAmelCase = UperNetHead(_snake_case , in_channels=self.backbone.channels )
UpperCAmelCase = UperNetFCNHead(_snake_case ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=_snake_case , config_class=_CONFIG_FOR_DOC )
def snake_case_ ( self , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , ) -> Union[tuple, SemanticSegmenterOutput]:
"""simple docstring"""
UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase = output_attentions if output_attentions is not None else self.config.output_attentions
UpperCAmelCase = self.backbone.forward_with_filtered_kwargs(
_snake_case , output_hidden_states=_snake_case , output_attentions=_snake_case )
UpperCAmelCase = outputs.feature_maps
UpperCAmelCase = self.decode_head(_snake_case )
UpperCAmelCase = nn.functional.interpolate(_snake_case , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=_snake_case )
UpperCAmelCase = None
if self.auxiliary_head is not None:
UpperCAmelCase = self.auxiliary_head(_snake_case )
UpperCAmelCase = nn.functional.interpolate(
_snake_case , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=_snake_case )
UpperCAmelCase = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
UpperCAmelCase = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
UpperCAmelCase = loss_fct(_snake_case , _snake_case )
UpperCAmelCase = loss_fct(_snake_case , _snake_case )
UpperCAmelCase = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
UpperCAmelCase = (logits,) + outputs[1:]
else:
UpperCAmelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=_snake_case , logits=_snake_case , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 152 |
def _lowerCAmelCase ( A__: list[int] , A__: list[int] ):
'''simple docstring'''
UpperCAmelCase = len(A__ )
print('''The following activities are selected:''' )
# The first activity is always selected
UpperCAmelCase = 0
print(A__ , end=''',''' )
# Consider rest of the activities
for j in range(A__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(A__ , end=''',''' )
UpperCAmelCase = j
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = [1, 3, 0, 5, 8, 5]
__magic_name__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 152 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__a )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True} )
lowercase__ = Features({"text": Value("string" )} )
lowercase__ = Features({} )
lowercase__ = "text"
@property
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
return {self.text_column: "text"}
| 64 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
__lowerCAmelCase : Optional[int] = ["bert-base-uncased", "bert-base-cased"]
__lowerCAmelCase : List[str] = "hf-internal-testing/tiny-bert-tf-only"
if is_tf_available():
class __lowerCAmelCase ( tf.keras.Model ):
"""simple docstring"""
def __init__( self : Any , _snake_case : str ):
super().__init__()
__lowercase : str = tokenizer
__lowercase : Any = AutoConfig.from_pretrained(_snake_case )
__lowercase : Union[str, Any] = TFAutoModel.from_config(_snake_case )
def snake_case_ ( self : str , _snake_case : int ):
__lowercase : Optional[Any] = self.tokenizer(_snake_case )
__lowercase : int = self.bert(**_snake_case )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self : int ):
super().setUp()
__lowercase : Optional[int] = [
BertTokenizer.from_pretrained(_snake_case ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
__lowercase : Optional[Any] = [TFBertTokenizer.from_pretrained(_snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(_snake_case , use_fast_bert_tokenizer=_snake_case )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__lowercase : Optional[int] = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__lowercase : Tuple = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def snake_case_ ( self : List[str] ):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
__lowercase : Dict = tokenizer(_snake_case , return_tensors='''tf''' , padding='''longest''' )
__lowercase : int = tf_tokenizer(_snake_case )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def snake_case_ ( self : Union[str, Any] ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase : Union[str, Any] = tf_tokenizer(self.paired_sentences )
__lowercase : List[str] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def snake_case_ ( self : Optional[Any] ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase : Any = tf.function(_snake_case )
for test_inputs in (self.test_sentences, self.paired_sentences):
__lowercase : List[Any] = tf.constant(_snake_case )
__lowercase : Any = compiled_tokenizer(_snake_case )
__lowercase : Union[str, Any] = tf_tokenizer(_snake_case )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def snake_case_ ( self : Tuple ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase : Any = ModelToSave(tokenizer=_snake_case )
__lowercase : str = tf.convert_to_tensor(self.test_sentences )
__lowercase : Union[str, Any] = model(_snake_case ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__lowercase : Union[str, Any] = Path(_snake_case ) / '''saved.model'''
model.save(_snake_case )
__lowercase : List[str] = tf.keras.models.load_model(_snake_case )
__lowercase : Tuple = loaded_model(_snake_case )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 156 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__a = {"processing_layoutxlm": ["LayoutXLMProcessor"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["LayoutXLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["LayoutXLMTokenizerFast"]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 353 |
'''simple docstring'''
import numpy as np
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1e-12 , _lowerCAmelCase = 100 , ) -> tuple[float, np.ndarray]:
assert np.shape(_lowerCAmelCase )[0] == np.shape(_lowerCAmelCase )[1]
# Ensure proper dimensionality.
assert np.shape(_lowerCAmelCase )[0] == np.shape(_lowerCAmelCase )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(_lowerCAmelCase ) == np.iscomplexobj(_lowerCAmelCase )
snake_case__ : str = np.iscomplexobj(_lowerCAmelCase )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(_lowerCAmelCase , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
snake_case__ : Tuple = False
snake_case__ : Any = 0
snake_case__ : List[str] = 0
snake_case__ : Dict = 1e12
while not convergence:
# Multiple matrix by the vector.
snake_case__ : Optional[int] = np.dot(_lowerCAmelCase , _lowerCAmelCase )
# Normalize the resulting output vector.
snake_case__ : Optional[Any] = w / np.linalg.norm(_lowerCAmelCase )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
snake_case__ : List[str] = vector.conj().T if is_complex else vector.T
snake_case__ : Optional[int] = np.dot(_lowerCAmelCase , np.dot(_lowerCAmelCase , _lowerCAmelCase ) )
# Check convergence.
snake_case__ : Union[str, Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
snake_case__ : Dict = True
snake_case__ : Union[str, Any] = lambda_
if is_complex:
snake_case__ : int = np.real(lambda_ )
return lambda_, vector
def __snake_case( ) -> None:
snake_case__ : int = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
snake_case__ : Tuple = np.array([41, 4, 20] )
snake_case__ : Dict = real_input_matrix.astype(np.complexaaa )
snake_case__ : Optional[int] = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
snake_case__ : Dict = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
snake_case__ : Dict = real_input_matrix
snake_case__ : Optional[Any] = real_vector
elif problem_type == "complex":
snake_case__ : Optional[Any] = complex_input_matrix
snake_case__ : Optional[Any] = complex_vector
# Our implementation.
snake_case__ , snake_case__ : Tuple = power_iteration(_lowerCAmelCase , _lowerCAmelCase )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
snake_case__ , snake_case__ : Dict = np.linalg.eigh(_lowerCAmelCase )
# Last eigenvalue is the maximum one.
snake_case__ : Optional[int] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
snake_case__ : Any = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(_lowerCAmelCase ) - np.abs(_lowerCAmelCase ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 43 | 0 |
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
lowerCAmelCase_ : List[Any] = 10
def _lowerCamelCase ( lowercase : int , lowercase : int , lowercase : list[int] , lowercase : int ) -> int:
for i in range(lowercase , lowercase ):
if array[i] == target:
return i
return -1
def _lowerCamelCase ( lowercase : list[int] , lowercase : int ) -> int:
_a = 0
_a = len(lowercase )
while left <= right:
if right - left < precision:
return lin_search(lowercase , lowercase , lowercase , lowercase )
_a = (left + right) // 3 + 1
_a = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_a = one_third - 1
elif array[two_third] < target:
_a = two_third + 1
else:
_a = one_third + 1
_a = two_third - 1
else:
return -1
def _lowerCamelCase ( lowercase : int , lowercase : int , lowercase : list[int] , lowercase : int ) -> int:
if left < right:
if right - left < precision:
return lin_search(lowercase , lowercase , lowercase , lowercase )
_a = (left + right) // 3 + 1
_a = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowercase , one_third - 1 , lowercase , lowercase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowercase , lowercase , lowercase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowercase , lowercase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ : int = input('Enter numbers separated by comma:\n').strip()
lowerCAmelCase_ : Tuple = [int(item.strip()) for item in user_input.split(',')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
lowerCAmelCase_ : str = int(input('Enter the number to be found in the list:\n').strip())
lowerCAmelCase_ : Optional[int] = ite_ternary_search(collection, target)
lowerCAmelCase_ : Optional[int] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"""Iterative search: {target} found at positions: {resulta}""")
print(f"""Recursive search: {target} found at positions: {resulta}""")
else:
print('Not found')
| 63 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = ['pixel_values']
def __init__( self: Any , UpperCamelCase_: bool = True , UpperCamelCase_: Union[int, float] = 1 / 2_55 , UpperCamelCase_: bool = True , UpperCamelCase_: int = 8 , **UpperCamelCase_: Tuple , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_pad
__lowerCamelCase = pad_size
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: np.ndarray , UpperCamelCase_: float , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Tuple ):
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: np.ndarray , UpperCamelCase_: int , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None ):
__lowerCamelCase, __lowerCamelCase = get_image_size(UpperCamelCase_ )
__lowerCamelCase = (old_height // size + 1) * size - old_height
__lowerCamelCase = (old_width // size + 1) * size - old_width
return pad(UpperCamelCase_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: ImageInput , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[float] = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[Union[str, TensorType]] = None , UpperCamelCase_: Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase_: Any , ):
__lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase = do_pad if do_pad is not None else self.do_pad
__lowerCamelCase = pad_size if pad_size is not None else self.pad_size
__lowerCamelCase = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_rescale:
__lowerCamelCase = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_pad:
__lowerCamelCase = [self.pad(UpperCamelCase_ , size=UpperCamelCase_ ) for image in images]
__lowerCamelCase = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
__lowerCamelCase = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 12 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( a_: int ):
_UpperCAmelCase : Union[str, Any] = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4)) | 350 | '''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
__a = (3, 9, -11, 0, 7, 5, 1, -1)
__a = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class A__ :
"""simple docstring"""
UpperCamelCase_ : int
UpperCamelCase_ : Node | None
class A__ :
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase__ : Iterable[int] ) -> None:
"""simple docstring"""
_UpperCAmelCase : Node | None = None
for i in sorted(lowerCAmelCase__ , reverse=lowerCAmelCase__ ):
_UpperCAmelCase : str = Node(lowerCAmelCase__ , self.head )
def __iter__( self : int ) -> Iterator[int]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.head
while node:
yield node.data
_UpperCAmelCase : List[str] = node.next_node
def __len__( self : Any ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __str__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return " -> ".join([str(lowerCAmelCase__ ) for node in self] )
def __UpperCAmelCase ( a_: SortedLinkedList, a_: SortedLinkedList ):
return SortedLinkedList(list(a_ ) + list(a_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even))) | 17 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__SCREAMING_SNAKE_CASE : Optional[int] = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: str = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__UpperCamelCase: Any = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__UpperCamelCase: int = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__UpperCamelCase: List[Any] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _A ( self : Union[str, Any] , A : int , A : Tuple , A : Optional[Any] ):
_UpperCAmelCase : Dict = ZeroShotClassificationPipeline(
model=A , tokenizer=A , candidate_labels=["polics", "health"] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _A ( self : Any , A : Optional[int] , A : Union[str, Any] ):
_UpperCAmelCase : List[Any] = classifier("Who are you voting for in 2020?" , candidate_labels="politics" )
self.assertEqual(A , {"sequence": ANY(A ), "labels": [ANY(A )], "scores": [ANY(A )]} )
# No kwarg
_UpperCAmelCase : int = classifier("Who are you voting for in 2020?" , ["politics"] )
self.assertEqual(A , {"sequence": ANY(A ), "labels": [ANY(A )], "scores": [ANY(A )]} )
_UpperCAmelCase : Dict = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] )
self.assertEqual(A , {"sequence": ANY(A ), "labels": [ANY(A )], "scores": [ANY(A )]} )
_UpperCAmelCase : Tuple = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" )
self.assertEqual(
A , {"sequence": ANY(A ), "labels": [ANY(A ), ANY(A )], "scores": [ANY(A ), ANY(A )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
_UpperCAmelCase : Any = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] )
self.assertEqual(
A , {"sequence": ANY(A ), "labels": [ANY(A ), ANY(A )], "scores": [ANY(A ), ANY(A )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
_UpperCAmelCase : Tuple = classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" )
self.assertEqual(A , {"sequence": ANY(A ), "labels": [ANY(A )], "scores": [ANY(A )]} )
# https://github.com/huggingface/transformers/issues/13846
_UpperCAmelCase : Union[str, Any] = classifier(["I am happy"] , ["positive", "negative"] )
self.assertEqual(
A , [
{"sequence": ANY(A ), "labels": [ANY(A ), ANY(A )], "scores": [ANY(A ), ANY(A )]}
for i in range(1 )
] , )
_UpperCAmelCase : Dict = classifier(["I am happy", "I am sad"] , ["positive", "negative"] )
self.assertEqual(
A , [
{"sequence": ANY(A ), "labels": [ANY(A ), ANY(A )], "scores": [ANY(A ), ANY(A )]}
for i in range(2 )
] , )
with self.assertRaises(A ):
classifier("" , candidate_labels="politics" )
with self.assertRaises(A ):
classifier(A , candidate_labels="politics" )
with self.assertRaises(A ):
classifier("Who are you voting for in 2020?" , candidate_labels="" )
with self.assertRaises(A ):
classifier("Who are you voting for in 2020?" , candidate_labels=A )
with self.assertRaises(A ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , )
with self.assertRaises(A ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=A , )
self.run_entailment_id(A )
def _A ( self : Tuple , A : Pipeline ):
_UpperCAmelCase : Tuple = zero_shot_classifier.model.config
_UpperCAmelCase : Optional[int] = config.labelaid
_UpperCAmelCase : Dict = zero_shot_classifier.entailment_id
_UpperCAmelCase : Optional[Any] = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
_UpperCAmelCase : Any = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_UpperCAmelCase : List[Any] = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_UpperCAmelCase : List[Any] = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
_UpperCAmelCase : Optional[int] = original_labelaid
self.assertEqual(A , zero_shot_classifier.entailment_id )
@require_torch
def _A ( self : Tuple ):
_UpperCAmelCase : Union[str, Any] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100 , candidate_labels=["politics", "public health", "science"] )
@require_torch
def _A ( self : Optional[int] ):
_UpperCAmelCase : Optional[Any] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
_UpperCAmelCase : Optional[Any] = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(A ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
} , )
@require_tf
def _A ( self : int ):
_UpperCAmelCase : Tuple = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , )
_UpperCAmelCase : Union[str, Any] = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(A ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : Any = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" )
_UpperCAmelCase : Any = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(A ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
} , )
_UpperCAmelCase : Optional[int] = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=A , )
self.assertEqual(
nested_simplify(A ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def _A ( self : str ):
_UpperCAmelCase : Tuple = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" )
_UpperCAmelCase : Optional[int] = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(A ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
} , )
_UpperCAmelCase : Optional[int] = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=A , )
self.assertEqual(
nested_simplify(A ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
} , )
| 31 | from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : torch.FloatTensor
class A ( nn.Module ):
def __init__(self : Union[str, Any] , __UpperCAmelCase : int=3 , __UpperCAmelCase : Dict=3 , __UpperCAmelCase : Optional[Any]=("DownEncoderBlock2D",) , __UpperCAmelCase : int=(6_4,) , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : Any=3_2 , __UpperCAmelCase : str="silu" , __UpperCAmelCase : Any=True , ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = layers_per_block
UpperCAmelCase__ = torch.nn.Convad(
__UpperCAmelCase , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
UpperCAmelCase__ = None
UpperCAmelCase__ = nn.ModuleList([] )
# down
UpperCAmelCase__ = block_out_channels[0]
for i, down_block_type in enumerate(__UpperCAmelCase ):
UpperCAmelCase__ = output_channel
UpperCAmelCase__ = block_out_channels[i]
UpperCAmelCase__ = i == len(__UpperCAmelCase ) - 1
UpperCAmelCase__ = get_down_block(
__UpperCAmelCase , num_layers=self.layers_per_block , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=__UpperCAmelCase , resnet_groups=__UpperCAmelCase , attention_head_dim=__UpperCAmelCase , temb_channels=__UpperCAmelCase , )
self.down_blocks.append(__UpperCAmelCase )
# mid
UpperCAmelCase__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=__UpperCAmelCase , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=__UpperCAmelCase , temb_channels=__UpperCAmelCase , )
# out
UpperCAmelCase__ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__UpperCAmelCase , eps=1E-6 )
UpperCAmelCase__ = nn.SiLU()
UpperCAmelCase__ = 2 * out_channels if double_z else out_channels
UpperCAmelCase__ = nn.Convad(block_out_channels[-1] , __UpperCAmelCase , 3 , padding=1 )
UpperCAmelCase__ = False
def lowercase_ (self : List[Any] , __UpperCAmelCase : int ) -> str:
"""simple docstring"""
UpperCAmelCase__ = x
UpperCAmelCase__ = self.conv_in(__UpperCAmelCase )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__UpperCAmelCase : int ):
def custom_forward(*__UpperCAmelCase : Optional[Any] ):
return module(*__UpperCAmelCase )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase , use_reentrant=__UpperCAmelCase )
# middle
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __UpperCAmelCase , use_reentrant=__UpperCAmelCase )
else:
for down_block in self.down_blocks:
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase )
# middle
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __UpperCAmelCase )
else:
# down
for down_block in self.down_blocks:
UpperCAmelCase__ = down_block(__UpperCAmelCase )
# middle
UpperCAmelCase__ = self.mid_block(__UpperCAmelCase )
# post-process
UpperCAmelCase__ = self.conv_norm_out(__UpperCAmelCase )
UpperCAmelCase__ = self.conv_act(__UpperCAmelCase )
UpperCAmelCase__ = self.conv_out(__UpperCAmelCase )
return sample
class A ( nn.Module ):
def __init__(self : List[Any] , __UpperCAmelCase : str=3 , __UpperCAmelCase : Union[str, Any]=3 , __UpperCAmelCase : Optional[int]=("UpDecoderBlock2D",) , __UpperCAmelCase : str=(6_4,) , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : Tuple=3_2 , __UpperCAmelCase : Any="silu" , __UpperCAmelCase : Any="group" , ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = layers_per_block
UpperCAmelCase__ = nn.Convad(
__UpperCAmelCase , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
UpperCAmelCase__ = None
UpperCAmelCase__ = nn.ModuleList([] )
UpperCAmelCase__ = in_channels if norm_type == "spatial" else None
# mid
UpperCAmelCase__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=__UpperCAmelCase , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__UpperCAmelCase , temb_channels=__UpperCAmelCase , )
# up
UpperCAmelCase__ = list(reversed(__UpperCAmelCase ) )
UpperCAmelCase__ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__UpperCAmelCase ):
UpperCAmelCase__ = output_channel
UpperCAmelCase__ = reversed_block_out_channels[i]
UpperCAmelCase__ = i == len(__UpperCAmelCase ) - 1
UpperCAmelCase__ = get_up_block(
__UpperCAmelCase , num_layers=self.layers_per_block + 1 , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , prev_output_channel=__UpperCAmelCase , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=__UpperCAmelCase , resnet_groups=__UpperCAmelCase , attention_head_dim=__UpperCAmelCase , temb_channels=__UpperCAmelCase , resnet_time_scale_shift=__UpperCAmelCase , )
self.up_blocks.append(__UpperCAmelCase )
UpperCAmelCase__ = output_channel
# out
if norm_type == "spatial":
UpperCAmelCase__ = SpatialNorm(block_out_channels[0] , __UpperCAmelCase )
else:
UpperCAmelCase__ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__UpperCAmelCase , eps=1E-6 )
UpperCAmelCase__ = nn.SiLU()
UpperCAmelCase__ = nn.Convad(block_out_channels[0] , __UpperCAmelCase , 3 , padding=1 )
UpperCAmelCase__ = False
def lowercase_ (self : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict=None ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = z
UpperCAmelCase__ = self.conv_in(__UpperCAmelCase )
UpperCAmelCase__ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__UpperCAmelCase : str ):
def custom_forward(*__UpperCAmelCase : List[str] ):
return module(*__UpperCAmelCase )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __UpperCAmelCase , __UpperCAmelCase , use_reentrant=__UpperCAmelCase )
UpperCAmelCase__ = sample.to(__UpperCAmelCase )
# up
for up_block in self.up_blocks:
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase , use_reentrant=__UpperCAmelCase )
else:
# middle
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = sample.to(__UpperCAmelCase )
# up
for up_block in self.up_blocks:
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase )
else:
# middle
UpperCAmelCase__ = self.mid_block(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = sample.to(__UpperCAmelCase )
# up
for up_block in self.up_blocks:
UpperCAmelCase__ = up_block(__UpperCAmelCase , __UpperCAmelCase )
# post-process
if latent_embeds is None:
UpperCAmelCase__ = self.conv_norm_out(__UpperCAmelCase )
else:
UpperCAmelCase__ = self.conv_norm_out(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = self.conv_act(__UpperCAmelCase )
UpperCAmelCase__ = self.conv_out(__UpperCAmelCase )
return sample
class A ( nn.Module ):
def __init__(self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Union[str, Any]="random" , __UpperCAmelCase : Dict=False , __UpperCAmelCase : Union[str, Any]=True ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = n_e
UpperCAmelCase__ = vq_embed_dim
UpperCAmelCase__ = beta
UpperCAmelCase__ = legacy
UpperCAmelCase__ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
UpperCAmelCase__ = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
UpperCAmelCase__ = self.used.shape[0]
UpperCAmelCase__ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
UpperCAmelCase__ = self.re_embed
UpperCAmelCase__ = self.re_embed + 1
print(
f"""Remapping {self.n_e} indices to {self.re_embed} indices. """
f"""Using {self.unknown_index} for unknown indices.""" )
else:
UpperCAmelCase__ = n_e
UpperCAmelCase__ = sane_index_shape
def lowercase_ (self : str , __UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = inds.shape
assert len(__UpperCAmelCase ) > 1
UpperCAmelCase__ = inds.reshape(ishape[0] , -1 )
UpperCAmelCase__ = self.used.to(__UpperCAmelCase )
UpperCAmelCase__ = (inds[:, :, None] == used[None, None, ...]).long()
UpperCAmelCase__ = match.argmax(-1 )
UpperCAmelCase__ = match.sum(2 ) < 1
if self.unknown_index == "random":
UpperCAmelCase__ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
UpperCAmelCase__ = self.unknown_index
return new.reshape(__UpperCAmelCase )
def lowercase_ (self : Tuple , __UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = inds.shape
assert len(__UpperCAmelCase ) > 1
UpperCAmelCase__ = inds.reshape(ishape[0] , -1 )
UpperCAmelCase__ = self.used.to(__UpperCAmelCase )
if self.re_embed > self.used.shape[0]: # extra token
UpperCAmelCase__ = 0 # simply set to zero
UpperCAmelCase__ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __UpperCAmelCase )
return back.reshape(__UpperCAmelCase )
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = z.permute(0 , 2 , 3 , 1 ).contiguous()
UpperCAmelCase__ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
UpperCAmelCase__ = torch.argmin(torch.cdist(__UpperCAmelCase , self.embedding.weight ) , dim=1 )
UpperCAmelCase__ = self.embedding(__UpperCAmelCase ).view(z.shape )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
# compute loss for embedding
if not self.legacy:
UpperCAmelCase__ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
UpperCAmelCase__ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
UpperCAmelCase__ = z + (z_q - z).detach()
# reshape back to match original input shape
UpperCAmelCase__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
UpperCAmelCase__ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
UpperCAmelCase__ = self.remap_to_used(__UpperCAmelCase )
UpperCAmelCase__ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
UpperCAmelCase__ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowercase_ (self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
if self.remap is not None:
UpperCAmelCase__ = indices.reshape(shape[0] , -1 ) # add batch axis
UpperCAmelCase__ = self.unmap_to_all(__UpperCAmelCase )
UpperCAmelCase__ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
UpperCAmelCase__ = self.embedding(__UpperCAmelCase )
if shape is not None:
UpperCAmelCase__ = z_q.view(__UpperCAmelCase )
# reshape back to match original input shape
UpperCAmelCase__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class A ( UpperCAmelCase_ ):
def __init__(self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : str=False ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = parameters
UpperCAmelCase__ , UpperCAmelCase__ = torch.chunk(__UpperCAmelCase , 2 , dim=1 )
UpperCAmelCase__ = torch.clamp(self.logvar , -30.0 , 20.0 )
UpperCAmelCase__ = deterministic
UpperCAmelCase__ = torch.exp(0.5 * self.logvar )
UpperCAmelCase__ = torch.exp(self.logvar )
if self.deterministic:
UpperCAmelCase__ = UpperCAmelCase__ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : Optional[torch.Generator] = None ) -> torch.FloatTensor:
"""simple docstring"""
UpperCAmelCase__ = randn_tensor(
self.mean.shape , generator=__UpperCAmelCase , device=self.parameters.device , dtype=self.parameters.dtype )
UpperCAmelCase__ = self.mean + self.std * sample
return x
def lowercase_ (self : str , __UpperCAmelCase : int=None ) -> Any:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowercase_ (self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any=[1, 2, 3] ) -> Dict:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
UpperCAmelCase__ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__UpperCAmelCase )
def lowercase_ (self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return self.mean
| 65 | 0 |
"""simple docstring"""
_a = 8.314_4598
def lowerCamelCase__ ( __snake_case, __snake_case ) -> int:
"""simple docstring"""
if temperature < 0:
raise Exception('''Temperature cannot be less than 0 K''' )
if molar_mass <= 0:
raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_a = 300
_a = 28
_a = rms_speed_of_molecule(temperature, molar_mass)
print(F"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 360 |
"""simple docstring"""
import numpy
class _UpperCAmelCase:
def __init__( self , __a , __a) -> None:
'''simple docstring'''
_UpperCamelCase = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
_UpperCamelCase = numpy.random.rand(
self.input_array.shape[1] , 4)
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
_UpperCamelCase = numpy.random.rand(
4 , 3)
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
_UpperCamelCase = numpy.random.rand(3 , 1)
# Real output values provided.
_UpperCamelCase = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
_UpperCamelCase = numpy.zeros(output_array.shape)
def UpperCAmelCase ( self) -> numpy.ndarray:
'''simple docstring'''
_UpperCamelCase = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights))
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
_UpperCamelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ))
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
_UpperCamelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ))
return self.layer_between_second_hidden_layer_and_output
def UpperCAmelCase ( self) -> None:
'''simple docstring'''
_UpperCamelCase = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , )
_UpperCamelCase = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer) , )
_UpperCamelCase = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def UpperCAmelCase ( self , __a , __a , __a) -> None:
'''simple docstring'''
for iteration in range(1 , iterations + 1):
_UpperCamelCase = self.feedforward()
self.back_propagation()
if give_loss:
_UpperCamelCase = numpy.mean(numpy.square(output - self.feedforward()))
print(F'''Iteration {iteration} Loss: {loss}''')
def UpperCAmelCase ( self , __a) -> int:
'''simple docstring'''
_UpperCamelCase = input_arr
_UpperCamelCase = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights))
_UpperCamelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ))
_UpperCamelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ))
return int(self.layer_between_second_hidden_layer_and_output > 0.6)
def lowerCamelCase__ ( __snake_case ) -> numpy.ndarray:
"""simple docstring"""
return 1 / (1 + numpy.exp(-value ))
def lowerCamelCase__ ( __snake_case ) -> numpy.ndarray:
"""simple docstring"""
return (value) * (1 - (value))
def lowerCamelCase__ ( ) -> int:
"""simple docstring"""
_UpperCamelCase = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
), dtype=numpy.floataa, )
# True output values for the given input values.
_UpperCamelCase = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]), dtype=numpy.floataa )
# Calling neural network class.
_UpperCamelCase = TwoHiddenLayerNeuralNetwork(
input_array=__snake_case, output_array=__snake_case )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=__snake_case, iterations=10, give_loss=__snake_case )
return neural_network.predict(numpy.array(([1, 1, 1]), dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 100 | 0 |
'''simple docstring'''
from math import factorial
a__ : int = {str(d): factorial(d) for d in range(1_0)}
def _lowercase ( __A ):
'''simple docstring'''
return sum(DIGIT_FACTORIAL[d] for d in str(__A ) )
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = 7 * factorial(9 ) + 1
return sum(i for i in range(3 ,__A ) if sum_of_digit_factorial(__A ) == i )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 349 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _lowercase ( __A ,__A=False ):
'''simple docstring'''
try:
__UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
__UpperCamelCase = strtobool(__A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no." )
return _value
a__ : Optional[Any] = parse_flag_from_env('RUN_SLOW', default=False)
a__ : Union[str, Any] = parse_flag_from_env('RUN_REMOTE', default=False)
a__ : Any = parse_flag_from_env('RUN_LOCAL', default=True)
a__ : List[Any] = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
a__ : Optional[int] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
a__ : Optional[int] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
a__ : Optional[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
a__ : List[Any] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
a__ : str = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
a__ : str = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
a__ : Tuple = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def _lowercase ( __A ):
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires faiss""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import regex # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires regex""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires elasticsearch""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires sqlalchemy""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.TORCH_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires PyTorch""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.TF_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires TensorFlow""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.JAX_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires JAX""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.PIL_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires Pillow""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
def _require_spacy_model(__A ):
try:
import spacy # noqa F401
spacy.load(__A )
except ImportError:
return unittest.skip("""test requires spacy""" )(__A )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(__A ) )(__A )
else:
return test_case
return _require_spacy_model
def _lowercase ( __A ):
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
__UpperCamelCase = unittest.skip("""test is slow""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
__UpperCamelCase = unittest.skip("""test is local""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
__UpperCamelCase = unittest.skip("""test is packaged""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
__UpperCamelCase = unittest.skip("""test requires remote""" )(__A )
return test_case
def _lowercase ( *__A ):
'''simple docstring'''
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(__A ) and name.startswith("""test""" ):
for decorator in decorators:
__UpperCamelCase = decorator(__A )
setattr(cls ,__A ,__A )
return cls
return decorate
class UpperCAmelCase__ ( UpperCAmelCase_):
pass
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
@contextmanager
def _lowercase ( __A=OfflineSimulationMode.CONNECTION_FAILS ,__A=1E-16 ):
'''simple docstring'''
__UpperCamelCase = requests.Session().request
def timeout_request(__A ,__A ,__A ,**__A ):
# Change the url to an invalid url so that the connection hangs
__UpperCamelCase = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
f"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." )
__UpperCamelCase = timeout
try:
return online_request(__A ,__A ,**__A )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__UpperCamelCase = url
__UpperCamelCase = e.args[0]
__UpperCamelCase = (max_retry_error.args[0].replace("""10.255.255.1""" ,f"OfflineMock[{url}]" ),)
__UpperCamelCase = (max_retry_error,)
raise
def raise_connection_error(__A ,__A ,**__A ):
raise requests.ConnectionError("""Offline mode is enabled.""" ,request=__A )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" ,__A ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" ,__A ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" ,__A ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def _lowercase ( *__A ,**__A ):
'''simple docstring'''
__UpperCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__A ,**__A ) as tmp_dir:
try:
os.chdir(__A )
yield
finally:
os.chdir(__A )
@contextmanager
def _lowercase ( ):
'''simple docstring'''
import gc
gc.collect()
__UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _lowercase ( ):
'''simple docstring'''
import gc
gc.collect()
__UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _lowercase ( __A ,__A ):
'''simple docstring'''
return deepcopy(__A ).integers(0 ,100 ,10 ).tolist() == deepcopy(__A ).integers(0 ,100 ,10 ).tolist()
def _lowercase ( __A ):
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(__A ,*__A ,**__A ):
try:
return func(*__A ,**__A )
except HTTPError as err:
if str(__A ).startswith("""500""" ) or str(__A ).startswith("""502""" ):
pytest.xfail(str(__A ) )
raise err
return decorator.decorator(_wrapper ,__A )
class UpperCAmelCase__ :
def __init__( self , lowercase , lowercase , lowercase ) -> str:
__UpperCamelCase = returncode
__UpperCamelCase = stdout
__UpperCamelCase = stderr
async def _lowercase ( __A ,__A ):
'''simple docstring'''
while True:
__UpperCamelCase = await stream.readline()
if line:
callback(__A )
else:
break
async def _lowercase ( __A ,__A=None ,__A=None ,__A=None ,__A=False ,__A=False ):
'''simple docstring'''
if echo:
print("""\nRunning: """ ,""" """.join(__A ) )
__UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=__A ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=__A ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__UpperCamelCase = []
__UpperCamelCase = []
def tee(__A ,__A ,__A ,__A="" ):
__UpperCamelCase = line.decode("""utf-8""" ).rstrip()
sink.append(__A )
if not quiet:
print(__A ,__A ,file=__A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout ,lambda __A : tee(__A ,__A ,sys.stdout ,label="""stdout:""" ) ),
_read_stream(p.stderr ,lambda __A : tee(__A ,__A ,sys.stderr ,label="""stderr:""" ) ),
] ,timeout=__A ,)
return _RunOutput(await p.wait() ,__A ,__A )
def _lowercase ( __A ,__A=None ,__A=None ,__A=180 ,__A=False ,__A=True ):
'''simple docstring'''
__UpperCamelCase = asyncio.get_event_loop()
__UpperCamelCase = loop.run_until_complete(
_stream_subprocess(__A ,env=__A ,stdin=__A ,timeout=__A ,quiet=__A ,echo=__A ) )
__UpperCamelCase = """ """.join(__A )
if result.returncode > 0:
__UpperCamelCase = """\n""".join(result.stderr )
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output." )
return result
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = os.environ.get("""PYTEST_XDIST_WORKER""" ,"""gw0""" )
__UpperCamelCase = re.sub(R"""^gw""" ,"""""" ,__A ,0 ,re.M )
return int(__A )
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = 29_500
__UpperCamelCase = pytest_xdist_worker_id()
return port + uniq_delta
| 349 | 1 |
"""simple docstring"""
from __future__ import annotations
import bisect
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ) -> int:
'''simple docstring'''
if hi < 0:
_lowerCamelCase : Dict = len(_lowerCamelCase )
while lo < hi:
_lowerCamelCase : List[str] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
_lowerCamelCase : Union[str, Any] = mid + 1
else:
_lowerCamelCase : int = mid
return lo
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ) -> int:
'''simple docstring'''
if hi < 0:
_lowerCamelCase : Union[str, Any] = len(_lowerCamelCase )
while lo < hi:
_lowerCamelCase : int = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
_lowerCamelCase : Union[str, Any] = mid + 1
else:
_lowerCamelCase : Optional[int] = mid
return lo
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_left(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_right(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int | None:
'''simple docstring'''
_lowerCamelCase : Tuple = 0
_lowerCamelCase : Tuple = len(_lowerCamelCase ) - 1
while left <= right:
_lowerCamelCase : Tuple = left + (right - left) // 2
_lowerCamelCase : Union[str, Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
_lowerCamelCase : List[str] = midpoint - 1
else:
_lowerCamelCase : Any = midpoint + 1
return None
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int | None:
'''simple docstring'''
_lowerCamelCase : Tuple = bisect.bisect_left(_lowerCamelCase , _lowerCamelCase )
if index != len(_lowerCamelCase ) and sorted_collection[index] == item:
return index
return None
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int | None:
'''simple docstring'''
if right < left:
return None
_lowerCamelCase : int = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , midpoint - 1 )
else:
return binary_search_by_recursion(_lowerCamelCase , _lowerCamelCase , midpoint + 1 , _lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : int = input('''Enter numbers separated by comma:\n''').strip()
_lowerCAmelCase : Any = sorted(int(item) for item in user_input.split(''','''))
_lowerCAmelCase : Optional[Any] = int(input('''Enter a single number to be found in the list:\n'''))
_lowerCAmelCase : Optional[Any] = binary_search(collection, target)
if result is None:
print(f'''{target} was not found in {collection}.''')
else:
print(f'''{target} was found at position {result} in {collection}.''') | 340 |
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
_lowerCAmelCase : Optional[Any] = logging.getLogger(__name__)
class A_ ( _a ):
lowerCAmelCase__ = 'masked_bert'
def __init__( self: Union[str, Any] ,__lowerCAmelCase: Dict=30_522 ,__lowerCAmelCase: Optional[int]=768 ,__lowerCAmelCase: Dict=12 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: List[Any]=3_072 ,__lowerCAmelCase: List[Any]="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Tuple=512 ,__lowerCAmelCase: str=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Union[str, Any]=1e-12 ,__lowerCAmelCase: Union[str, Any]=0 ,__lowerCAmelCase: List[Any]="topK" ,__lowerCAmelCase: Optional[Any]="constant" ,__lowerCAmelCase: Optional[Any]=0.0 ,**__lowerCAmelCase: str ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : str = max_position_embeddings
_lowerCamelCase : List[str] = type_vocab_size
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : List[Any] = layer_norm_eps
_lowerCamelCase : int = pruning_method
_lowerCamelCase : str = mask_init
_lowerCamelCase : List[Any] = mask_scale | 340 | 1 |
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def UpperCAmelCase_ ( __lowercase : List[str] ) -> List[str]:
'''simple docstring'''
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def UpperCAmelCase_ ( __lowercase : Optional[Any] ) -> Any:
'''simple docstring'''
class A_ :
def __init__( self : Optional[int] , snake_case_ : str ):
_UpperCAmelCase = metric_id
class A_ :
_lowerCamelCase : Any = [MetricMock(lowerCAmelCase_ ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def lowercase ( self : Tuple ):
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Optional[int] , __lowercase : List[Any] , __lowercase : List[str] ) -> int:
'''simple docstring'''
if "tmp_path" in args:
_UpperCAmelCase = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(__lowercase , match="https://huggingface.co/docs/evaluate" ):
func(*__lowercase )
| 22 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Dict = {
"configuration_blip_2": [
"BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Blip2Config",
"Blip2QFormerConfig",
"Blip2VisionConfig",
],
"processing_blip_2": ["Blip2Processor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = [
"BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Blip2Model",
"Blip2QFormerModel",
"Blip2PreTrainedModel",
"Blip2ForConditionalGeneration",
"Blip2VisionModel",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
A_ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 165 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
def lowerCAmelCase_ ( __UpperCAmelCase: Dict ) -> List[List[ImageInput]]:
if isinstance(__UpperCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__UpperCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__UpperCAmelCase ):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}" )
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : Optional[Any] = ["pixel_values"]
def __init__( self, __magic_name__ = True, __magic_name__ = None, __magic_name__ = PILImageResampling.BILINEAR, __magic_name__ = True, __magic_name__ = None, __magic_name__ = True, __magic_name__ = 1 / 255, __magic_name__ = True, __magic_name__ = None, __magic_name__ = None, **__magic_name__, ) -> None:
"""simple docstring"""
super().__init__(**__magic_name__ )
UpperCamelCase__ : Union[str, Any] = size if size is not None else {'''shortest_edge''': 224}
UpperCamelCase__ : List[str] = get_size_dict(__magic_name__, default_to_square=__magic_name__ )
UpperCamelCase__ : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCamelCase__ : Optional[Any] = get_size_dict(__magic_name__, param_name='''crop_size''' )
UpperCamelCase__ : Optional[int] = do_resize
UpperCamelCase__ : List[Any] = size
UpperCamelCase__ : Optional[Any] = do_center_crop
UpperCamelCase__ : List[str] = crop_size
UpperCamelCase__ : Union[str, Any] = resample
UpperCamelCase__ : List[str] = do_rescale
UpperCamelCase__ : Dict = rescale_factor
UpperCamelCase__ : Tuple = do_normalize
UpperCamelCase__ : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ = PILImageResampling.BILINEAR, __magic_name__ = None, **__magic_name__, ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase__ : Any = get_size_dict(__magic_name__, default_to_square=__magic_name__ )
if "shortest_edge" in size:
UpperCamelCase__ : List[Any] = get_resize_output_image_size(__magic_name__, size['''shortest_edge'''], default_to_square=__magic_name__ )
elif "height" in size and "width" in size:
UpperCamelCase__ : str = (size['''height'''], size['''width'''])
else:
raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(__magic_name__, size=__magic_name__, resample=__magic_name__, data_format=__magic_name__, **__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ = None, **__magic_name__, ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase__ : List[str] = get_size_dict(__magic_name__ )
if "height" not in size or "width" not in size:
raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(__magic_name__, size=(size['''height'''], size['''width''']), data_format=__magic_name__, **__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ = None, **__magic_name__, ) -> List[Any]:
"""simple docstring"""
return rescale(__magic_name__, scale=__magic_name__, data_format=__magic_name__, **__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__ = None, **__magic_name__, ) -> np.ndarray:
"""simple docstring"""
return normalize(__magic_name__, mean=__magic_name__, std=__magic_name__, data_format=__magic_name__, **__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = ChannelDimension.FIRST, ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCamelCase__ : Optional[Any] = to_numpy_array(__magic_name__ )
if do_resize:
UpperCamelCase__ : Optional[Any] = self.resize(image=__magic_name__, size=__magic_name__, resample=__magic_name__ )
if do_center_crop:
UpperCamelCase__ : Any = self.center_crop(__magic_name__, size=__magic_name__ )
if do_rescale:
UpperCamelCase__ : Optional[Any] = self.rescale(image=__magic_name__, scale=__magic_name__ )
if do_normalize:
UpperCamelCase__ : Optional[int] = self.normalize(image=__magic_name__, mean=__magic_name__, std=__magic_name__ )
UpperCamelCase__ : int = to_channel_dimension_format(__magic_name__, __magic_name__ )
return image
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = ChannelDimension.FIRST, **__magic_name__, ) -> PIL.Image.Image:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ : str = resample if resample is not None else self.resample
UpperCamelCase__ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ : Any = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ : List[str] = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ : int = image_std if image_std is not None else self.image_std
UpperCamelCase__ : List[str] = size if size is not None else self.size
UpperCamelCase__ : List[Any] = get_size_dict(__magic_name__, default_to_square=__magic_name__ )
UpperCamelCase__ : Optional[Any] = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ : int = get_size_dict(__magic_name__, param_name='''crop_size''' )
if not valid_images(__magic_name__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
UpperCamelCase__ : Optional[Any] = make_batched(__magic_name__ )
UpperCamelCase__ : Dict = [
[
self._preprocess_image(
image=__magic_name__, do_resize=__magic_name__, size=__magic_name__, resample=__magic_name__, do_center_crop=__magic_name__, crop_size=__magic_name__, do_rescale=__magic_name__, rescale_factor=__magic_name__, do_normalize=__magic_name__, image_mean=__magic_name__, image_std=__magic_name__, data_format=__magic_name__, )
for img in video
]
for video in videos
]
UpperCamelCase__ : Optional[Any] = {'''pixel_values''': videos}
return BatchFeature(data=__magic_name__, tensor_type=__magic_name__ )
| 362 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[int] , __UpperCAmelCase: Optional[Any]=False ) -> List[Any]:
UpperCamelCase__ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCamelCase__ : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def lowerCAmelCase_ ( __UpperCAmelCase: Dict , __UpperCAmelCase: Any , __UpperCAmelCase: Dict=False ) -> Union[str, Any]:
for i in range(config.num_hidden_layers ):
if base_model:
UpperCamelCase__ : Tuple = ''''''
else:
UpperCamelCase__ : Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ : Dict = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
UpperCamelCase__ : Union[str, Any] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase__ : int = in_proj_bias[: config.hidden_size]
UpperCamelCase__ : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ : List[Any] = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] ) -> Optional[Any]:
UpperCamelCase__ : int = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] , __UpperCAmelCase: str , __UpperCAmelCase: Tuple ) -> Dict:
UpperCamelCase__ : List[str] = dct.pop(__UpperCAmelCase )
UpperCamelCase__ : int = val
def lowerCAmelCase_ ( ) -> Tuple:
UpperCamelCase__ : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase__ : int = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __UpperCAmelCase: List[str] , __UpperCAmelCase: Dict , __UpperCAmelCase: List[Any]=True ) -> Union[str, Any]:
UpperCamelCase__ : Optional[Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
UpperCamelCase__ : List[str] = 8
# set labels if required
if not base_model:
UpperCamelCase__ : Union[str, Any] = 1000
UpperCamelCase__ : Optional[Any] = '''huggingface/label-files'''
UpperCamelCase__ : Dict = '''imagenet-1k-id2label.json'''
UpperCamelCase__ : str = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase__ : Dict = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCamelCase__ : str = idalabel
UpperCamelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
UpperCamelCase__ : str = 384
UpperCamelCase__ : str = 1536
UpperCamelCase__ : Tuple = 12
UpperCamelCase__ : Optional[int] = 6
# load original model from torch hub
UpperCamelCase__ : Any = torch.hub.load('''facebookresearch/dino:main''' , __UpperCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCamelCase__ : str = original_model.state_dict()
if base_model:
remove_classification_head_(__UpperCAmelCase )
UpperCamelCase__ : int = create_rename_keys(__UpperCAmelCase , base_model=__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# load HuggingFace model
if base_model:
UpperCamelCase__ : int = ViTModel(__UpperCAmelCase , add_pooling_layer=__UpperCAmelCase ).eval()
else:
UpperCamelCase__ : Optional[int] = ViTForImageClassification(__UpperCAmelCase ).eval()
model.load_state_dict(__UpperCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
UpperCamelCase__ : Dict = ViTImageProcessor()
UpperCamelCase__ : List[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCamelCase__ : Optional[Any] = encoding['''pixel_values''']
UpperCamelCase__ : Optional[Any] = model(__UpperCAmelCase )
if base_model:
UpperCamelCase__ : Union[str, Any] = original_model(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
UpperCamelCase__ : Any = original_model(__UpperCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCAmelCase , outputs.logits , atol=1e-3 )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
UpperCAmelCase_ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 247 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase__ : str = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowercase__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 264 |
"""simple docstring"""
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
lowerCamelCase__ : str = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
lowerCamelCase__ : Union[str, Any] = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def UpperCamelCase ( _lowerCAmelCase : List[str] ) -> Optional[Any]:
_UpperCAmelCase : str = numpy.dtype(numpy.uintaa ).newbyteorder(""">""" )
return numpy.frombuffer(bytestream.read(4 ), dtype=_lowerCAmelCase )[0]
@deprecated(_lowerCAmelCase, """Please use tf.data to implement this functionality.""" )
def UpperCamelCase ( _lowerCAmelCase : int ) -> Optional[Any]:
print("""Extracting""", f.name )
with gzip.GzipFile(fileobj=_lowerCAmelCase ) as bytestream:
_UpperCAmelCase : Tuple = _readaa(_lowerCAmelCase )
if magic != 2051:
raise ValueError(
"""Invalid magic number %d in MNIST image file: %s""" % (magic, f.name) )
_UpperCAmelCase : Optional[int] = _readaa(_lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = _readaa(_lowerCAmelCase )
_UpperCAmelCase : List[str] = _readaa(_lowerCAmelCase )
_UpperCAmelCase : List[str] = bytestream.read(rows * cols * num_images )
_UpperCAmelCase : Optional[Any] = numpy.frombuffer(_lowerCAmelCase, dtype=numpy.uinta )
_UpperCAmelCase : Any = data.reshape(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, 1 )
return data
@deprecated(_lowerCAmelCase, """Please use tf.one_hot on tensors.""" )
def UpperCamelCase ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Tuple ) -> Union[str, Any]:
_UpperCAmelCase : int = labels_dense.shape[0]
_UpperCAmelCase : Any = numpy.arange(_lowerCAmelCase ) * num_classes
_UpperCAmelCase : Tuple = numpy.zeros((num_labels, num_classes) )
_UpperCAmelCase : Dict = 1
return labels_one_hot
@deprecated(_lowerCAmelCase, """Please use tf.data to implement this functionality.""" )
def UpperCamelCase ( _lowerCAmelCase : Optional[int], _lowerCAmelCase : Optional[int]=False, _lowerCAmelCase : Optional[Any]=10 ) -> Union[str, Any]:
print("""Extracting""", f.name )
with gzip.GzipFile(fileobj=_lowerCAmelCase ) as bytestream:
_UpperCAmelCase : Tuple = _readaa(_lowerCAmelCase )
if magic != 2049:
raise ValueError(
"""Invalid magic number %d in MNIST label file: %s""" % (magic, f.name) )
_UpperCAmelCase : str = _readaa(_lowerCAmelCase )
_UpperCAmelCase : Dict = bytestream.read(_lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = numpy.frombuffer(_lowerCAmelCase, dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_lowerCAmelCase, _lowerCAmelCase )
return labels
class _UpperCAmelCase :
@deprecated(
_A , """Please use alternatives such as official/mnist/_DataSet.py"""
""" from tensorflow/models.""" , )
def __init__( self , _A , _A , _A=False , _A=False , _A=dtypes.floataa , _A=True , _A=None , ) -> str:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : int = random_seed.get_seed(_A )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
_UpperCAmelCase : Tuple = dtypes.as_dtype(_A ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("""Invalid image dtype %r, expected uint8 or float32""" % dtype )
if fake_data:
_UpperCAmelCase : Union[str, Any] = 1_00_00
_UpperCAmelCase : Union[str, Any] = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
_UpperCAmelCase : Any = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
_UpperCAmelCase : int = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
_UpperCAmelCase : Dict = images.astype(numpy.floataa )
_UpperCAmelCase : Any = numpy.multiply(_A , 1.0 / 255.0 )
_UpperCAmelCase : Union[str, Any] = images
_UpperCAmelCase : List[Any] = labels
_UpperCAmelCase : Optional[Any] = 0
_UpperCAmelCase : Optional[Any] = 0
@property
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
return self._images
@property
def __snake_case ( self ) -> Any:
'''simple docstring'''
return self._labels
@property
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
return self._num_examples
@property
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
return self._epochs_completed
def __snake_case ( self , _A , _A=False , _A=True ) -> Tuple:
'''simple docstring'''
if fake_data:
_UpperCAmelCase : int = [1] * 7_84
_UpperCAmelCase : str = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_A )],
[fake_label for _ in range(_A )],
)
_UpperCAmelCase : Tuple = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
_UpperCAmelCase : str = numpy.arange(self._num_examples )
numpy.random.shuffle(_A )
_UpperCAmelCase : List[Any] = self.images[perma]
_UpperCAmelCase : Union[str, Any] = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
_UpperCAmelCase : List[Any] = self._num_examples - start
_UpperCAmelCase : str = self._images[start : self._num_examples]
_UpperCAmelCase : List[str] = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
_UpperCAmelCase : str = numpy.arange(self._num_examples )
numpy.random.shuffle(_A )
_UpperCAmelCase : Optional[int] = self.images[perm]
_UpperCAmelCase : str = self.labels[perm]
# Start next epoch
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : Tuple = batch_size - rest_num_examples
_UpperCAmelCase : Union[str, Any] = self._index_in_epoch
_UpperCAmelCase : Optional[int] = self._images[start:end]
_UpperCAmelCase : str = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
_UpperCAmelCase : List[Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_lowerCAmelCase, """Please write your own downloading logic.""" )
def UpperCamelCase ( _lowerCAmelCase : int, _lowerCAmelCase : List[Any], _lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
if not gfile.Exists(_lowerCAmelCase ):
gfile.MakeDirs(_lowerCAmelCase )
_UpperCAmelCase : Optional[int] = os.path.join(_lowerCAmelCase, _lowerCAmelCase )
if not gfile.Exists(_lowerCAmelCase ):
urllib.request.urlretrieve(_lowerCAmelCase, _lowerCAmelCase ) # noqa: S310
with gfile.GFile(_lowerCAmelCase ) as f:
_UpperCAmelCase : Optional[int] = f.size()
print("""Successfully downloaded""", _lowerCAmelCase, _lowerCAmelCase, """bytes.""" )
return filepath
@deprecated(
_lowerCAmelCase, """Please use alternatives such as:""" """ tensorflow_datasets.load('mnist')""" )
def UpperCamelCase ( _lowerCAmelCase : Tuple, _lowerCAmelCase : str=False, _lowerCAmelCase : List[str]=False, _lowerCAmelCase : Tuple=dtypes.floataa, _lowerCAmelCase : List[str]=True, _lowerCAmelCase : Union[str, Any]=5000, _lowerCAmelCase : Optional[Any]=None, _lowerCAmelCase : int=DEFAULT_SOURCE_URL, ) -> Optional[Any]:
if fake_data:
def fake():
return _DataSet(
[], [], fake_data=_lowerCAmelCase, one_hot=_lowerCAmelCase, dtype=_lowerCAmelCase, seed=_lowerCAmelCase )
_UpperCAmelCase : List[Any] = fake()
_UpperCAmelCase : int = fake()
_UpperCAmelCase : Any = fake()
return _Datasets(train=_lowerCAmelCase, validation=_lowerCAmelCase, test=_lowerCAmelCase )
if not source_url: # empty string check
_UpperCAmelCase : Optional[Any] = DEFAULT_SOURCE_URL
_UpperCAmelCase : Optional[int] = """train-images-idx3-ubyte.gz"""
_UpperCAmelCase : int = """train-labels-idx1-ubyte.gz"""
_UpperCAmelCase : Optional[Any] = """t10k-images-idx3-ubyte.gz"""
_UpperCAmelCase : Tuple = """t10k-labels-idx1-ubyte.gz"""
_UpperCAmelCase : Tuple = _maybe_download(
_lowerCAmelCase, _lowerCAmelCase, source_url + train_images_file )
with gfile.Open(_lowerCAmelCase, """rb""" ) as f:
_UpperCAmelCase : Optional[int] = _extract_images(_lowerCAmelCase )
_UpperCAmelCase : Any = _maybe_download(
_lowerCAmelCase, _lowerCAmelCase, source_url + train_labels_file )
with gfile.Open(_lowerCAmelCase, """rb""" ) as f:
_UpperCAmelCase : Optional[int] = _extract_labels(_lowerCAmelCase, one_hot=_lowerCAmelCase )
_UpperCAmelCase : Optional[int] = _maybe_download(
_lowerCAmelCase, _lowerCAmelCase, source_url + test_images_file )
with gfile.Open(_lowerCAmelCase, """rb""" ) as f:
_UpperCAmelCase : Union[str, Any] = _extract_images(_lowerCAmelCase )
_UpperCAmelCase : Optional[int] = _maybe_download(
_lowerCAmelCase, _lowerCAmelCase, source_url + test_labels_file )
with gfile.Open(_lowerCAmelCase, """rb""" ) as f:
_UpperCAmelCase : List[Any] = _extract_labels(_lowerCAmelCase, one_hot=_lowerCAmelCase )
if not 0 <= validation_size <= len(_lowerCAmelCase ):
_UpperCAmelCase : int = (
"""Validation size should be between 0 and """
f'''{len(_lowerCAmelCase )}. Received: {validation_size}.'''
)
raise ValueError(_lowerCAmelCase )
_UpperCAmelCase : str = train_images[:validation_size]
_UpperCAmelCase : Union[str, Any] = train_labels[:validation_size]
_UpperCAmelCase : Optional[Any] = train_images[validation_size:]
_UpperCAmelCase : Optional[int] = train_labels[validation_size:]
_UpperCAmelCase : Optional[int] = {"""dtype""": dtype, """reshape""": reshape, """seed""": seed}
_UpperCAmelCase : Tuple = _DataSet(_lowerCAmelCase, _lowerCAmelCase, **_lowerCAmelCase )
_UpperCAmelCase : Dict = _DataSet(_lowerCAmelCase, _lowerCAmelCase, **_lowerCAmelCase )
_UpperCAmelCase : List[Any] = _DataSet(_lowerCAmelCase, _lowerCAmelCase, **_lowerCAmelCase )
return _Datasets(train=_lowerCAmelCase, validation=_lowerCAmelCase, test=_lowerCAmelCase )
| 246 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ : List[Any] = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
UpperCAmelCase_ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 363 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple , __magic_name__ : Any , __magic_name__ : str ) -> Optional[int]:
"""simple docstring"""
return params[f"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any , __magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : Tuple="attention" ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase :Any = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
UpperCamelCase :Any = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
UpperCamelCase :Optional[Any] = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
UpperCamelCase :Tuple = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
UpperCamelCase :Any = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
UpperCamelCase :List[str] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
UpperCamelCase :int = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
UpperCamelCase :Optional[int] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int]=False ) -> Optional[int]:
"""simple docstring"""
if split_mlp_wi:
UpperCamelCase :Tuple = params[f"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
UpperCamelCase :Optional[int] = params[f"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
UpperCamelCase :Union[str, Any] = (wi_a, wi_a)
else:
UpperCamelCase :Union[str, Any] = params[f"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
UpperCamelCase :List[str] = params[f"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : str ) -> List[Any]:
"""simple docstring"""
return params[f"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : dict , *, __magic_name__ : int , __magic_name__ : bool , __magic_name__ : bool = False ) -> List[str]:
"""simple docstring"""
UpperCamelCase :str = traverse_util.flatten_dict(variables["""target"""] )
UpperCamelCase :int = {"""/""".join(__magic_name__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
UpperCamelCase :Union[str, Any] = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , __magic_name__ )
UpperCamelCase :Tuple = collections.OrderedDict()
# Shared embeddings.
UpperCamelCase :Optional[int] = old["""token_embedder/embedding"""]
# Encoder.
for i in range(__magic_name__ ):
# Block i, layer 0 (Self Attention).
UpperCamelCase :List[Any] = tax_layer_norm_lookup(__magic_name__ , __magic_name__ , """encoder""" , """pre_attention_layer_norm""" )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Union[str, Any] = tax_attention_lookup(__magic_name__ , __magic_name__ , """encoder""" , """attention""" )
UpperCamelCase :Dict = layer_norm
UpperCamelCase :str = k.T
UpperCamelCase :int = o.T
UpperCamelCase :Optional[int] = q.T
UpperCamelCase :List[str] = v.T
# Block i, layer 1 (MLP).
UpperCamelCase :Union[str, Any] = tax_layer_norm_lookup(__magic_name__ , __magic_name__ , """encoder""" , """pre_mlp_layer_norm""" )
UpperCamelCase , UpperCamelCase :List[Any] = tax_mlp_lookup(__magic_name__ , __magic_name__ , """encoder""" , __magic_name__ )
UpperCamelCase :Dict = layer_norm
if split_mlp_wi:
UpperCamelCase :Union[str, Any] = wi[0].T
UpperCamelCase :List[str] = wi[1].T
else:
UpperCamelCase :str = wi.T
UpperCamelCase :Dict = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCamelCase :List[Any] = tax_relpos_bias_lookup(
__magic_name__ , __magic_name__ , """encoder""" ).T
UpperCamelCase :Dict = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
UpperCamelCase :Optional[Any] = tax_relpos_bias_lookup(
__magic_name__ , 0 , """encoder""" ).T
UpperCamelCase :str = tax_relpos_bias_lookup(
__magic_name__ , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(__magic_name__ ):
# Block i, layer 0 (Self Attention).
UpperCamelCase :Tuple = tax_layer_norm_lookup(__magic_name__ , __magic_name__ , """decoder""" , """pre_self_attention_layer_norm""" )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Any = tax_attention_lookup(__magic_name__ , __magic_name__ , """decoder""" , """self_attention""" )
UpperCamelCase :Any = layer_norm
UpperCamelCase :Tuple = k.T
UpperCamelCase :Optional[Any] = o.T
UpperCamelCase :List[Any] = q.T
UpperCamelCase :Optional[Any] = v.T
# Block i, layer 1 (Cross Attention).
UpperCamelCase :List[str] = tax_layer_norm_lookup(__magic_name__ , __magic_name__ , """decoder""" , """pre_cross_attention_layer_norm""" )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :int = tax_attention_lookup(__magic_name__ , __magic_name__ , """decoder""" , """encoder_decoder_attention""" )
UpperCamelCase :Union[str, Any] = layer_norm
UpperCamelCase :int = k.T
UpperCamelCase :Union[str, Any] = o.T
UpperCamelCase :Optional[Any] = q.T
UpperCamelCase :List[str] = v.T
# Block i, layer 2 (MLP).
UpperCamelCase :Tuple = tax_layer_norm_lookup(__magic_name__ , __magic_name__ , """decoder""" , """pre_mlp_layer_norm""" )
UpperCamelCase , UpperCamelCase :Optional[int] = tax_mlp_lookup(__magic_name__ , __magic_name__ , """decoder""" , __magic_name__ )
UpperCamelCase :Optional[int] = layer_norm
if split_mlp_wi:
UpperCamelCase :List[Any] = wi[0].T
UpperCamelCase :Tuple = wi[1].T
else:
UpperCamelCase :Any = wi.T
UpperCamelCase :List[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCamelCase :Optional[int] = tax_relpos_bias_lookup(__magic_name__ , __magic_name__ , """decoder""" ).T
UpperCamelCase :int = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
UpperCamelCase :Dict = old["""decoder/logits_dense/kernel"""].T
return new
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] , __magic_name__ : bool ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase :Tuple = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
UpperCamelCase :str = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
UpperCamelCase :Union[str, Any] = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
UpperCamelCase :Dict = state_dict["""shared.weight"""]
return state_dict
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : Any , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : int ) -> List[str]:
"""simple docstring"""
UpperCamelCase :Union[str, Any] = checkpoints.load_tax_checkpoint(__magic_name__ )
UpperCamelCase :Optional[Any] = convert_tax_to_pytorch(
__magic_name__ , num_layers=config.num_layers , is_encoder_only=__magic_name__ , scalable_attention=__magic_name__ )
UpperCamelCase :Optional[int] = make_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ , strict=__magic_name__ )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : bool = False , __magic_name__ : bool = False , ) -> List[str]:
"""simple docstring"""
UpperCamelCase :Tuple = MTaConfig.from_json_file(__magic_name__ )
print(f"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
UpperCamelCase :Optional[Any] = UMTaEncoderModel(__magic_name__ )
else:
UpperCamelCase :Tuple = UMTaForConditionalGeneration(__magic_name__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__magic_name__ )
# Verify that we can load the checkpoint.
model.from_pretrained(__magic_name__ )
print("""Done""" )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
UpperCAmelCase_ : str = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 62 | 0 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__A = {
"E": 1_2.7_0,
"T": 9.0_6,
"A": 8.1_7,
"O": 7.5_1,
"I": 6.9_7,
"N": 6.7_5,
"S": 6.3_3,
"H": 6.0_9,
"R": 5.9_9,
"D": 4.2_5,
"L": 4.0_3,
"C": 2.7_8,
"U": 2.7_6,
"M": 2.4_1,
"W": 2.3_6,
"F": 2.2_3,
"G": 2.0_2,
"Y": 1.9_7,
"P": 1.9_3,
"B": 1.2_9,
"V": 0.9_8,
"K": 0.7_7,
"J": 0.1_5,
"X": 0.1_5,
"Q": 0.1_0,
"Z": 0.0_7,
}
__A = "ETAOINSHRDLCUMWFGYPBVKJXQZ"
__A = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> dict[str, int]:
"""simple docstring"""
__lowerCamelCase = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def lowerCamelCase_ ( UpperCamelCase__ : tuple ) -> str:
"""simple docstring"""
return x[0]
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> str:
"""simple docstring"""
__lowerCamelCase = get_letter_count(UpperCamelCase__ )
__lowerCamelCase = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(UpperCamelCase__ )
__lowerCamelCase = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=UpperCamelCase__ )
__lowerCamelCase = ''.join(freq_to_letter[freq] )
__lowerCamelCase = list(freq_to_letter_str.items() )
freq_pairs.sort(key=UpperCamelCase__ , reverse=UpperCamelCase__ )
__lowerCamelCase = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> int:
"""simple docstring"""
__lowerCamelCase = get_frequency_order(UpperCamelCase__ )
__lowerCamelCase = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90 |
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = n
__lowerCamelCase = [None] * self.n
__lowerCamelCase = 0 # index of the first element
__lowerCamelCase = 0
__lowerCamelCase = 0
def __len__( self ) -> int:
'''simple docstring'''
return self.size
def lowercase_ ( self ) -> bool:
'''simple docstring'''
return self.size == 0
def lowercase_ ( self ) -> str:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
__lowerCamelCase = data
__lowerCamelCase = (self.rear + 1) % self.n
self.size += 1
return self
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
if self.size == 0:
raise Exception('UNDERFLOW' )
__lowerCamelCase = self.array[self.front]
__lowerCamelCase = None
__lowerCamelCase = (self.front + 1) % self.n
self.size -= 1
return temp
| 90 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class a_ :
'''simple docstring'''
lowerCamelCase__ : Any = PegasusConfig
lowerCamelCase__ : Optional[Any] = {}
lowerCamelCase__ : int = 'gelu'
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=False, lowerCamelCase_=9_9, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=4_0, lowerCamelCase_=2, lowerCamelCase_=1, lowerCamelCase_=0, ):
'''simple docstring'''
lowerCamelCase__ : Any = parent
lowerCamelCase__ : int = batch_size
lowerCamelCase__ : Optional[int] = seq_length
lowerCamelCase__ : Tuple = is_training
lowerCamelCase__ : Optional[int] = use_labels
lowerCamelCase__ : Optional[int] = vocab_size
lowerCamelCase__ : Tuple = hidden_size
lowerCamelCase__ : Tuple = num_hidden_layers
lowerCamelCase__ : Optional[Any] = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : List[str] = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : List[str] = eos_token_id
lowerCamelCase__ : Dict = pad_token_id
lowerCamelCase__ : Optional[Any] = bos_token_id
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
lowerCamelCase__ : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
lowerCamelCase__ : Union[str, Any] = tf.concat([input_ids, eos_tensor], axis=1 )
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : Any = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
lowerCamelCase__ : List[str] = prepare_pegasus_inputs_dict(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
return config, inputs_dict
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = TFPegasusModel(config=lowerCamelCase_ ).get_decoder()
lowerCamelCase__ : Any = inputs_dict['input_ids']
lowerCamelCase__ : Tuple = input_ids[:1, :]
lowerCamelCase__ : str = inputs_dict['attention_mask'][:1, :]
lowerCamelCase__ : Optional[Any] = inputs_dict['head_mask']
lowerCamelCase__ : str = 1
# first forward pass
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, head_mask=lowerCamelCase_, use_cache=lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : Any = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ : int = ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
lowerCamelCase__ : Tuple = tf.concat([input_ids, next_tokens], axis=-1 )
lowerCamelCase__ : List[str] = tf.concat([attention_mask, next_attn_mask], axis=-1 )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )[0]
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, past_key_values=lowerCamelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
lowerCamelCase__ : List[str] = int(ids_tensor((1,), output_from_past.shape[-1] ) )
lowerCamelCase__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase__ : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase_, lowerCamelCase_, rtol=1e-3 )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , ):
if attention_mask is None:
lowerCamelCase__ : Any = tf.cast(tf.math.not_equal(_lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCamelCase__ : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCamelCase__ : Union[str, Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__ : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
lowerCamelCase__ : int = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
lowerCamelCase__ : Optional[int] = (
{
'conversational': TFPegasusForConditionalGeneration,
'feature-extraction': TFPegasusModel,
'summarization': TFPegasusForConditionalGeneration,
'text2text-generation': TFPegasusForConditionalGeneration,
'translation': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCamelCase__ : int = True
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : Tuple = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = TFPegasusModelTester(self )
lowerCamelCase__ : Tuple = ConfigTester(self, config_class=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
lowerCamelCase__ : str = [
'California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'
' reduce the risk of wildfires.',
'N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
lowerCamelCase__ : int = 'google/pegasus-xsum'
@cached_property
def a__ (self ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.translate_src_text(**lowerCamelCase_ )
assert self.expected_text == generated_words
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.tokenizer(self.src_text, **lowerCamelCase_, padding=lowerCamelCase_, return_tensors='tf' )
lowerCamelCase__ : Tuple = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=lowerCamelCase_, )
lowerCamelCase__ : Tuple = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=lowerCamelCase_ )
return generated_words
@slow
def a__ (self ):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 316 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ : str = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : bool = field(default=snake_case_ , metadata={'help': 'Whether to use SortishSampler or not.'} )
lowerCamelCase__ : bool = field(
default=snake_case_ , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
lowerCamelCase__ : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
lowerCamelCase__ : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
lowerCamelCase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=snake_case_ , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : Any = v.to_dict()
return d
| 316 | 1 |
from ...configuration_utils import PretrainedConfig
__A : Optional[Any] = {
"google/tapas-base-finetuned-sqa": (
"https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"
),
"google/tapas-base-finetuned-wtq": (
"https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"
),
"google/tapas-base-finetuned-wikisql-supervised": (
"https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"
),
"google/tapas-base-finetuned-tabfact": (
"https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"
),
}
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : Dict = "tapas"
def __init__( self : Optional[Any] , UpperCAmelCase_ : Union[str, Any]=30522 , UpperCAmelCase_ : List[str]=768 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : Optional[int]=12 , UpperCAmelCase_ : List[str]=3072 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Dict=1024 , UpperCAmelCase_ : Optional[int]=[3, 256, 256, 2, 256, 256, 10] , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : int=1E-12 , UpperCAmelCase_ : Tuple=0 , UpperCAmelCase_ : Optional[int]=10.0 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : List[Any]=1.0 , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Tuple=1.0 , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Any=1.0 , UpperCAmelCase_ : List[str]=1.0 , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Dict="ratio" , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : List[str]=64 , UpperCAmelCase_ : List[str]=32 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : int=False , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Optional[Any]=None , **UpperCAmelCase_ : Any , ):
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
lowerCAmelCase : Union[str, Any] = vocab_size
lowerCAmelCase : Optional[int] = hidden_size
lowerCAmelCase : Optional[Any] = num_hidden_layers
lowerCAmelCase : int = num_attention_heads
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : Optional[Any] = hidden_dropout_prob
lowerCAmelCase : Tuple = attention_probs_dropout_prob
lowerCAmelCase : List[str] = max_position_embeddings
lowerCAmelCase : Tuple = type_vocab_sizes
lowerCAmelCase : Tuple = initializer_range
lowerCAmelCase : Any = layer_norm_eps
# Fine-tuning task hyperparameters
lowerCAmelCase : Dict = positive_label_weight
lowerCAmelCase : Optional[int] = num_aggregation_labels
lowerCAmelCase : List[Any] = aggregation_loss_weight
lowerCAmelCase : List[Any] = use_answer_as_supervision
lowerCAmelCase : Tuple = answer_loss_importance
lowerCAmelCase : Tuple = use_normalized_answer_loss
lowerCAmelCase : List[str] = huber_loss_delta
lowerCAmelCase : int = temperature
lowerCAmelCase : int = aggregation_temperature
lowerCAmelCase : List[Any] = use_gumbel_for_cells
lowerCAmelCase : List[str] = use_gumbel_for_aggregation
lowerCAmelCase : Any = average_approximation_function
lowerCAmelCase : List[Any] = cell_selection_preference
lowerCAmelCase : Dict = answer_loss_cutoff
lowerCAmelCase : Union[str, Any] = max_num_rows
lowerCAmelCase : List[Any] = max_num_columns
lowerCAmelCase : Union[str, Any] = average_logits_per_cell
lowerCAmelCase : List[str] = select_one_column
lowerCAmelCase : Dict = allow_empty_column_selection
lowerCAmelCase : List[Any] = init_cell_selection_weights_to_zero
lowerCAmelCase : Any = reset_position_index_per_cell
lowerCAmelCase : Tuple = disable_per_token_loss
# Aggregation hyperparameters
lowerCAmelCase : Union[str, Any] = aggregation_labels
lowerCAmelCase : Union[str, Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels , UpperCAmelCase_ ):
lowerCAmelCase : Any = {int(UpperCAmelCase_ ): v for k, v in aggregation_labels.items()}
| 138 |
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 100 ) -> int:
UpperCAmelCase_ : Tuple = n * (n + 1) * (2 * n + 1) / 6
UpperCAmelCase_ : Optional[int] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 125 | 0 |
"""simple docstring"""
from typing import Any
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any, UpperCAmelCase__ : Any ):
__lowercase = data
__lowercase = None
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] ):
__lowercase = None
def _lowercase ( self : Any ):
__lowercase = self.head
while temp is not None:
print(temp.data, end=" " )
__lowercase = temp.next
print()
def _lowercase ( self : List[Any], UpperCAmelCase__ : Any ):
__lowercase = Node(UpperCAmelCase__ )
__lowercase = self.head
__lowercase = new_node
def _lowercase ( self : Dict, UpperCAmelCase__ : str, UpperCAmelCase__ : Tuple ):
if node_data_a == node_data_a:
return
else:
__lowercase = self.head
while node_a is not None and node_a.data != node_data_a:
__lowercase = node_a.next
__lowercase = self.head
while node_a is not None and node_a.data != node_data_a:
__lowercase = node_a.next
if node_a is None or node_a is None:
return
__lowercase ,__lowercase = node_a.data, node_a.data
if __name__ == "__main__":
_a = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list()
| 366 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_a = logging.get_logger(__name__)
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = ["pixel_values"]
def __init__( self : int, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Dict[str, int]] = None, UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Dict[str, int] = None, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Union[int, float] = 1 / 2_5_5, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Union[float, List[float]]] = None, UpperCAmelCase__ : Optional[Union[float, List[float]]] = None, **UpperCAmelCase__ : str, ):
super().__init__(**UpperCAmelCase__ )
__lowercase = size if size is not None else {"shortest_edge": 2_5_6}
__lowercase = get_size_dict(UpperCAmelCase__, default_to_square=UpperCAmelCase__ )
__lowercase = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
__lowercase = get_size_dict(UpperCAmelCase__ )
__lowercase = do_resize
__lowercase = size
__lowercase = resample
__lowercase = do_center_crop
__lowercase = crop_size
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self : int, UpperCAmelCase__ : np.ndarray, UpperCAmelCase__ : Dict[str, int], UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC, UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None, **UpperCAmelCase__ : Dict, ):
__lowercase = get_size_dict(UpperCAmelCase__, default_to_square=UpperCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__lowercase = get_resize_output_image_size(UpperCAmelCase__, size=size["shortest_edge"], default_to_square=UpperCAmelCase__ )
return resize(UpperCAmelCase__, size=UpperCAmelCase__, resample=UpperCAmelCase__, data_format=UpperCAmelCase__, **UpperCAmelCase__ )
def _lowercase ( self : Dict, UpperCAmelCase__ : np.ndarray, UpperCAmelCase__ : Dict[str, int], UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None, **UpperCAmelCase__ : Dict, ):
__lowercase = get_size_dict(UpperCAmelCase__ )
return center_crop(UpperCAmelCase__, size=(size["height"], size["width"]), data_format=UpperCAmelCase__, **UpperCAmelCase__ )
def _lowercase ( self : Optional[int], UpperCAmelCase__ : np.ndarray, UpperCAmelCase__ : float, UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None, **UpperCAmelCase__ : Union[str, Any] ):
return rescale(UpperCAmelCase__, scale=UpperCAmelCase__, data_format=UpperCAmelCase__, **UpperCAmelCase__ )
def _lowercase ( self : List[Any], UpperCAmelCase__ : np.ndarray, UpperCAmelCase__ : Union[float, List[float]], UpperCAmelCase__ : Union[float, List[float]], UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None, **UpperCAmelCase__ : int, ):
return normalize(UpperCAmelCase__, mean=UpperCAmelCase__, std=UpperCAmelCase__, data_format=UpperCAmelCase__, **UpperCAmelCase__ )
def _lowercase ( self : Any, UpperCAmelCase__ : ImageInput, UpperCAmelCase__ : Optional[bool] = None, UpperCAmelCase__ : Dict[str, int] = None, UpperCAmelCase__ : PILImageResampling = None, UpperCAmelCase__ : bool = None, UpperCAmelCase__ : Dict[str, int] = None, UpperCAmelCase__ : Optional[bool] = None, UpperCAmelCase__ : Optional[float] = None, UpperCAmelCase__ : Optional[bool] = None, UpperCAmelCase__ : Optional[Union[float, List[float]]] = None, UpperCAmelCase__ : Optional[Union[float, List[float]]] = None, UpperCAmelCase__ : Optional[Union[str, TensorType]] = None, UpperCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST, **UpperCAmelCase__ : Optional[int], ):
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(UpperCAmelCase__, default_to_square=UpperCAmelCase__ )
__lowercase = resample if resample is not None else self.resample
__lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase = crop_size if crop_size is not None else self.crop_size
__lowercase = get_size_dict(UpperCAmelCase__ )
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
__lowercase = [self.resize(image=UpperCAmelCase__, size=UpperCAmelCase__, resample=UpperCAmelCase__ ) for image in images]
if do_center_crop:
__lowercase = [self.center_crop(image=UpperCAmelCase__, size=UpperCAmelCase__ ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=UpperCAmelCase__, scale=UpperCAmelCase__ ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=UpperCAmelCase__, mean=UpperCAmelCase__, std=UpperCAmelCase__ ) for image in images]
__lowercase = [to_channel_dimension_format(UpperCAmelCase__, UpperCAmelCase__ ) for image in images]
__lowercase = {"pixel_values": images}
return BatchFeature(data=UpperCAmelCase__, tensor_type=UpperCAmelCase__ )
| 144 | 0 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case__ : Optional[int] = logging.get_logger(__name__)
snake_case__ : Optional[Any] = {'''vocab_file''': '''vocab.txt'''}
snake_case__ : Any = {
'''vocab_file''': {
'''openbmb/cpm-ant-10b''': '''https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt''',
},
}
snake_case__ : Tuple = {
'''openbmb/cpm-ant-10b''': 1_024,
}
def _snake_case ( _snake_case : int ):
lowerCAmelCase : List[str] = collections.OrderedDict()
with open(_snake_case , '''r''' , encoding='''utf-8''' ) as reader:
lowerCAmelCase : List[Any] = reader.readlines()
for index, token in enumerate(_snake_case ):
lowerCAmelCase : List[Any] = token.rstrip('''\n''' )
lowerCAmelCase : Tuple = index
return vocab
class snake_case_( a__ ):
def __init__( self : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]="<unk>" , UpperCamelCase_ : Any=2_0_0 ):
lowerCAmelCase : Any = vocab
lowerCAmelCase : List[Any] = unk_token
lowerCAmelCase : Dict = max_input_chars_per_word
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Tuple ):
lowerCAmelCase : Any = list(UpperCamelCase_ )
if len(UpperCamelCase_ ) > self.max_input_chars_per_word:
return [self.unk_token]
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Optional[int] = []
while start < len(UpperCamelCase_ ):
lowerCAmelCase : List[str] = len(UpperCamelCase_ )
lowerCAmelCase : Dict = None
while start < end:
lowerCAmelCase : Union[str, Any] = ''''''.join(chars[start:end] )
if substr in self.vocab:
lowerCAmelCase : Any = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(UpperCamelCase_ )
lowerCAmelCase : List[Any] = end
return sub_tokens
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
__UpperCamelCase = False
def __init__( self : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any]="<d>" , UpperCamelCase_ : int="</d>" , UpperCamelCase_ : str="<s>" , UpperCamelCase_ : str="</s>" , UpperCamelCase_ : Optional[int]="<pad>" , UpperCamelCase_ : Optional[int]="<unk>" , UpperCamelCase_ : Any="</n>" , UpperCamelCase_ : List[Any]="</_>" , UpperCamelCase_ : int="left" , **UpperCamelCase_ : List[str] , ):
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=UpperCamelCase_ , eod_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , line_token=UpperCamelCase_ , space_token=UpperCamelCase_ , padding_side=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCAmelCase : Any = bod_token
lowerCAmelCase : Union[str, Any] = eod_token
lowerCAmelCase : Optional[Any] = load_vocab(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = self.encoder[space_token]
lowerCAmelCase : Union[str, Any] = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowerCAmelCase : Any = collections.OrderedDict(sorted(self.encoder.items() , key=lambda UpperCamelCase_ : x[1] ) )
lowerCAmelCase : Tuple = {v: k for k, v in self.encoder.items()}
lowerCAmelCase : Any = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def lowerCamelCase__ ( self : Optional[int] ):
return self.encoder[self.bod_token]
@property
def lowerCamelCase__ ( self : List[str] ):
return self.encoder[self.eod_token]
@property
def lowerCamelCase__ ( self : Any ):
return self.encoder["\n"]
@property
def lowerCamelCase__ ( self : List[Any] ):
return len(self.encoder )
def lowerCamelCase__ ( self : str ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[Any] ):
lowerCAmelCase : Dict = []
for x in jieba.cut(UpperCamelCase_ , cut_all=UpperCamelCase_ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(UpperCamelCase_ ) )
return output_tokens
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Dict , **UpperCamelCase_ : Union[str, Any] ):
lowerCAmelCase : Tuple = [i for i in token_ids if i >= 0]
lowerCAmelCase : Dict = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : str ):
return token in self.encoder
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : List[str] ):
return "".join(UpperCamelCase_ )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : int ):
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : Dict ):
return self.decoder.get(UpperCamelCase_ , self.unk_token )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
if os.path.isdir(UpperCamelCase_ ):
lowerCAmelCase : Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowerCAmelCase : int = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
lowerCAmelCase : Any = 0
if " " in self.encoder:
lowerCAmelCase : int = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
lowerCAmelCase : Optional[Any] = self.encoder['''\n''']
del self.encoder["\n"]
lowerCAmelCase : List[Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda UpperCamelCase_ : x[1] ) )
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
''' Please check that the vocabulary is not corrupted!''' )
lowerCAmelCase : str = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def lowerCamelCase__ ( self : str , UpperCamelCase_ : List[int] , UpperCamelCase_ : List[int] = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ ))
return [1] + ([0] * len(UpperCamelCase_ ))
| 60 |
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : str = 'T5Config'
def SCREAMING_SNAKE_CASE_ ( __A : jnp.array , __A : int , __A : int ) -> jnp.ndarray:
"""simple docstring"""
a_ : Dict = jnp.zeros_like(__A )
a_ : Dict = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
a_ : str = shifted_input_ids.at[:, 0].set(__A )
a_ : int = jnp.where(shifted_input_ids == -1_00 , __A , __A )
return shifted_input_ids
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : str = '''mt5'''
snake_case__ : List[Any] = MTaConfig
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : str = '''mt5'''
snake_case__ : List[str] = MTaConfig
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Any = '''mt5'''
snake_case__ : Union[str, Any] = MTaConfig
| 32 | 0 |
"""simple docstring"""
import numpy
class A__ :
'''simple docstring'''
def __init__( self: Dict , _SCREAMING_SNAKE_CASE: numpy.ndarray , _SCREAMING_SNAKE_CASE: numpy.ndarray) -> None:
"""simple docstring"""
__lowerCAmelCase : int = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__lowerCAmelCase : Optional[int] = numpy.random.rand(
self.input_array.shape[1] , 4)
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__lowerCAmelCase : Optional[Any] = numpy.random.rand(
4 , 3)
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__lowerCAmelCase : Any = numpy.random.rand(3 , 1)
# Real output values provided.
__lowerCAmelCase : List[str] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__lowerCAmelCase : str = numpy.zeros(output_array.shape)
def _SCREAMING_SNAKE_CASE ( self: Any) -> numpy.ndarray:
"""simple docstring"""
__lowerCAmelCase : List[Any] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights))
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__lowerCAmelCase : int = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ))
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__lowerCAmelCase : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ))
return self.layer_between_second_hidden_layer_and_output
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> None:
"""simple docstring"""
__lowerCAmelCase : List[Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , )
__lowerCAmelCase : Union[str, Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer) , )
__lowerCAmelCase : Union[str, Any] = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: numpy.ndarray , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: bool) -> None:
"""simple docstring"""
for iteration in range(1 , iterations + 1):
__lowerCAmelCase : List[str] = self.feedforward()
self.back_propagation()
if give_loss:
__lowerCAmelCase : Optional[int] = numpy.mean(numpy.square(output - self.feedforward()))
print(F"""Iteration {iteration} Loss: {loss}""")
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: numpy.ndarray) -> int:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = input_arr
__lowerCAmelCase : Optional[Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights))
__lowerCAmelCase : List[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ))
__lowerCAmelCase : int = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ))
return int(self.layer_between_second_hidden_layer_and_output > 0.6)
def _lowercase ( __snake_case ) -> List[Any]:
return 1 / (1 + numpy.exp(-value ))
def _lowercase ( __snake_case ) -> Any:
return (value) * (1 - (value))
def _lowercase ( ) -> Any:
__lowerCAmelCase : int = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) ,dtype=numpy.floataa ,)
# True output values for the given input values.
__lowerCAmelCase : Tuple = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) ,dtype=numpy.floataa )
# Calling neural network class.
__lowerCAmelCase : Union[str, Any] = TwoHiddenLayerNeuralNetwork(
input_array=__snake_case ,output_array=__snake_case )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=__snake_case ,iterations=10 ,give_loss=__snake_case )
return neural_network.predict(numpy.array(([1, 1, 1]) ,dtype=numpy.floataa ) )
if __name__ == "__main__":
example() | 361 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class A__ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Optional[int]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Any = ort.SessionOptions()
__lowerCAmelCase : Dict = False
return options
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
__lowerCAmelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
__lowerCAmelCase : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy")
# using the PNDM scheduler by default
__lowerCAmelCase : int = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = "A red cat sitting on a park bench"
__lowerCAmelCase : Optional[int] = np.random.RandomState(0)
__lowerCAmelCase : Optional[int] = pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=_SCREAMING_SNAKE_CASE , output_type="np" , )
__lowerCAmelCase : int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 1e-2 | 58 | 0 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
UpperCAmelCase__ = logging.get_logger(__name__)
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : int , *__UpperCAmelCase : Any , **__UpperCAmelCase : Optional[int] ) ->None:
"""simple docstring"""
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any, UpperCAmelCase__ : int ):
__lowercase = num_of_nodes
__lowercase = []
__lowercase = {}
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : int, UpperCAmelCase__ : int ):
self.m_edges.append([u_node, v_node, weight] )
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _lowercase ( self : List[Any], UpperCAmelCase__ : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowercase = self.find_component(UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : list[int], UpperCAmelCase__ : int, UpperCAmelCase__ : int ):
if component_size[u_node] <= component_size[v_node]:
__lowercase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCAmelCase__ )
elif component_size[u_node] >= component_size[v_node]:
__lowercase = self.find_component(UpperCAmelCase__ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCAmelCase__ )
def _lowercase ( self : Any ):
__lowercase = []
__lowercase = 0
__lowercase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__lowercase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowercase ,__lowercase ,__lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowercase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase ,__lowercase ,__lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
__lowercase = [-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def _A ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( _a , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = BloomTokenizerFast
__UpperCamelCase = BloomTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = "tokenizer_file"
__UpperCamelCase = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE_ : List[str] = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''')
tokenizer.save_pretrained(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : Dict , **lowercase_ : Dict):
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **snake_case_)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""]
SCREAMING_SNAKE_CASE_ : str = [[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
SCREAMING_SNAKE_CASE_ : int = tokenizer.batch_encode_plus(snake_case_)["""input_ids"""]
self.assertListEqual(snake_case_ , snake_case_)
SCREAMING_SNAKE_CASE_ : str = tokenizer.batch_decode(snake_case_)
self.assertListEqual(snake_case_ , snake_case_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : str=6):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})'):
SCREAMING_SNAKE_CASE_ : Any = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_)
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
SCREAMING_SNAKE_CASE_ : str = """This is a simple input"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
SCREAMING_SNAKE_CASE_ : List[str] = ("""This is a simple input""", """This is a pair""")
SCREAMING_SNAKE_CASE_ : str = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
try:
tokenizer_r.encode(snake_case_ , max_length=snake_case_)
tokenizer_r.encode_plus(snake_case_ , max_length=snake_case_)
tokenizer_r.batch_encode_plus(snake_case_ , max_length=snake_case_)
tokenizer_r.encode(snake_case_ , max_length=snake_case_)
tokenizer_r.batch_encode_plus(snake_case_ , max_length=snake_case_)
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None # Hotfixing padding = None
self.assertRaises(snake_case_ , tokenizer_r.encode , snake_case_ , max_length=snake_case_ , padding='''max_length''')
# Simple input
self.assertRaises(snake_case_ , tokenizer_r.encode_plus , snake_case_ , max_length=snake_case_ , padding='''max_length''')
# Simple input
self.assertRaises(
snake_case_ , tokenizer_r.batch_encode_plus , snake_case_ , max_length=snake_case_ , padding='''max_length''' , )
# Pair input
self.assertRaises(snake_case_ , tokenizer_r.encode , snake_case_ , max_length=snake_case_ , padding='''max_length''')
# Pair input
self.assertRaises(snake_case_ , tokenizer_r.encode_plus , snake_case_ , max_length=snake_case_ , padding='''max_length''')
# Pair input
self.assertRaises(
snake_case_ , tokenizer_r.batch_encode_plus , snake_case_ , max_length=snake_case_ , padding='''max_length''' , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : Any = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=snake_case_)
SCREAMING_SNAKE_CASE_ : Dict = next(iter(snake_case_))["""premise"""] # pick up one data
SCREAMING_SNAKE_CASE_ : Tuple = list(sample_data.values())
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(map(tokenizer.encode , snake_case_))
SCREAMING_SNAKE_CASE_ : Optional[Any] = [tokenizer.decode(snake_case_ , clean_up_tokenization_spaces=snake_case_) for x in output_tokens]
self.assertListEqual(snake_case_ , snake_case_)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map) , 1)
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]) , 1)
| 371 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = "ssube/stable-diffusion-x4-upscaler-onnx"
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Union[str, Any]=0):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowercase_))
SCREAMING_SNAKE_CASE_ : List[str] = torch.manual_seed(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : Dict = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Any = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Any = np.array(
[0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Tuple = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Tuple = np.array(
[0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : List[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : int = np.array(
[0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = ort.SessionOptions()
SCREAMING_SNAKE_CASE_ : Optional[int] = False
return options
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
SCREAMING_SNAKE_CASE_ : Tuple = init_image.resize((128, 128))
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''A fantasy landscape, trending on artstation'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : List[Any] = pipe(
prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase_ , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images
SCREAMING_SNAKE_CASE_ : Optional[int] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : int = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
SCREAMING_SNAKE_CASE_ : Tuple = init_image.resize((128, 128))
SCREAMING_SNAKE_CASE_ : Tuple = LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''')
SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : int = '''A fantasy landscape, trending on artstation'''
SCREAMING_SNAKE_CASE_ : List[Any] = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : int = pipe(
prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=20 , generator=lowercase_ , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images
SCREAMING_SNAKE_CASE_ : Dict = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : List[str] = np.array(
[0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 318 | 0 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class A__ :
@staticmethod
def UpperCamelCase__ ( *__magic_name__ , **__magic_name__ ):
pass
def _a ( lowerCamelCase ):
lowerCamelCase : Union[str, Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class A__ ( unittest.TestCase):
_UpperCAmelCase : Any = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : int = DepthEstimationPipeline(model=__magic_name__ , image_processor=__magic_name__ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
lowerCamelCase : str = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , __magic_name__ )
import datasets
lowerCamelCase : int = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
lowerCamelCase : Optional[Any] = depth_estimator(
[
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
] )
self.assertEqual(
[
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
] , __magic_name__ , )
@require_tf
@unittest.skip("""Depth estimation is not implemented in TF""" )
def UpperCamelCase__ ( self ):
pass
@slow
@require_torch
def UpperCamelCase__ ( self ):
lowerCamelCase : int = """Intel/dpt-large"""
lowerCamelCase : Dict = pipeline("""depth-estimation""" , model=__magic_name__ )
lowerCamelCase : Tuple = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
lowerCamelCase : Optional[Any] = hashimage(outputs["""depth"""] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.662 )
@require_torch
def UpperCamelCase__ ( self ):
# This is highly irregular to have no small tests.
self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
| 287 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = """camembert"""
def __init__( self , __magic_name__=3_0_5_2_2 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__="absolute" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ):
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
lowerCamelCase : int = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : int = num_attention_heads
lowerCamelCase : Optional[int] = hidden_act
lowerCamelCase : List[Any] = intermediate_size
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = max_position_embeddings
lowerCamelCase : str = type_vocab_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : int = layer_norm_eps
lowerCamelCase : Any = position_embedding_type
lowerCamelCase : Optional[int] = use_cache
lowerCamelCase : Union[str, Any] = classifier_dropout
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
if self.task == "multiple-choice":
lowerCamelCase : List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 287 | 1 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def UpperCamelCase ( _a , _a=1_0 ) -> List[Any]:
'''simple docstring'''
lowercase_ :Optional[int] = []
for _ in range(_a ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def UpperCamelCase ( _a , _a=1_0 ) -> Optional[Any]:
'''simple docstring'''
lowercase_ :Tuple = []
for step in range(_a ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ :Optional[Any] = os.path.join(_a , '''schedule.bin''' )
torch.save(scheduler.state_dict() , _a )
lowercase_ :str = torch.load(_a )
scheduler.load_state_dict(_a )
return lrs
@require_torch
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertAlmostEqual(UpperCamelCase_ , UpperCamelCase_ , delta=UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ :str = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase_ )
lowercase_ :List[Any] = torch.tensor([0.4, 0.2, -0.5] )
lowercase_ :Dict = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowercase_ :List[Any] = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
lowercase_ :Dict = criterion(UpperCamelCase_ , UpperCamelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def UpperCamelCase ( self ):
lowercase_ :Optional[int] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase_ )
lowercase_ :List[str] = torch.tensor([0.4, 0.2, -0.5] )
lowercase_ :Union[str, Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowercase_ :Tuple = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCamelCase_ , weight_decay=0.0 , relative_step=UpperCamelCase_ , scale_parameter=UpperCamelCase_ , warmup_init=UpperCamelCase_ , )
for _ in range(1000 ):
lowercase_ :str = criterion(UpperCamelCase_ , UpperCamelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowercase : Optional[Any] =nn.Linear(50 , 50 ) if is_torch_available() else None
lowercase : Tuple =AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
lowercase : Optional[int] =10
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ):
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertAlmostEqual(UpperCamelCase_ , UpperCamelCase_ , delta=UpperCamelCase_ , msg=UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = {'''num_warmup_steps''': 2, '''num_training_steps''': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
lowercase_ :Any = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'''num_warmup_steps''': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, '''num_cycles''': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, '''power''': 2.0, '''lr_end''': 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'''num_warmup_steps''': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
lowercase_ , lowercase_ :str = data
lowercase_ :List[Any] = scheduler_func(self.optimizer , **UpperCamelCase_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
lowercase_ :Tuple = unwrap_schedule(UpperCamelCase_ , self.num_steps )
self.assertListAlmostEqual(
UpperCamelCase_ , UpperCamelCase_ , tol=1E-2 , msg=f"failed for {scheduler_func} in normal scheduler" , )
lowercase_ :Any = scheduler_func(self.optimizer , **UpperCamelCase_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCamelCase_ ) # wrap to test picklability of the schedule
lowercase_ :List[Any] = unwrap_and_save_reload_schedule(UpperCamelCase_ , self.num_steps )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ , msg=f"failed for {scheduler_func} in save and reload" )
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_ ):
lowercase_ :Any = fn
def __call__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
return self.fn(*UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :List[Any] = list(map(self , scheduler.lr_lambdas ) )
| 252 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
SCREAMING_SNAKE_CASE : Dict = 16
SCREAMING_SNAKE_CASE : str = 32
def UpperCamelCase ( _a ) -> Any:
'''simple docstring'''
return int(x / 2**2_0 )
class UpperCamelCase :
'''simple docstring'''
def __enter__( self ):
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowercase_ :List[str] = torch.cuda.memory_allocated()
return self
def __exit__( self , *UpperCamelCase_ ):
gc.collect()
torch.cuda.empty_cache()
lowercase_ :Any = torch.cuda.memory_allocated()
lowercase_ :Union[str, Any] = torch.cuda.max_memory_allocated()
lowercase_ :Optional[int] = bamb(self.end - self.begin )
lowercase_ :List[str] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def UpperCamelCase ( _a , _a = 1_6 , _a = "bert-base-cased" , _a = 3_2_0 , _a = 1_6_0 , ) -> Optional[Any]:
'''simple docstring'''
lowercase_ :Optional[Any] = AutoTokenizer.from_pretrained(_a )
lowercase_ :int = load_dataset(
'''glue''' , '''mrpc''' , split={'''train''': f"train[:{n_train}]", '''validation''': f"validation[:{n_val}]"} )
def tokenize_function(_a ):
# max_length=None => use the model max length (it's actually the default)
lowercase_ :Any = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_a , max_length=_a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase_ :Tuple = datasets.map(
_a , batched=_a , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=_a )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase_ :int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_a , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return tokenizer.pad(_a , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowercase_ :Union[str, Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=_a , collate_fn=_a , batch_size=_a )
lowercase_ :str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_a , collate_fn=_a , batch_size=_a )
return train_dataloader, eval_dataloader
def UpperCamelCase ( _a , _a ) -> List[Any]:
'''simple docstring'''
lowercase_ :Tuple = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase_ :Dict = config['''lr''']
lowercase_ :List[Any] = int(config['''num_epochs'''] )
lowercase_ :Tuple = int(config['''seed'''] )
lowercase_ :List[str] = int(config['''batch_size'''] )
lowercase_ :Optional[Any] = args.model_name_or_path
set_seed(_a )
lowercase_ , lowercase_ :Any = get_dataloaders(_a , _a , _a , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase_ :Tuple = AutoModelForSequenceClassification.from_pretrained(_a , return_dict=_a )
# Instantiate optimizer
lowercase_ :Any = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase_ :str = optimizer_cls(params=model.parameters() , lr=_a )
if accelerator.state.deepspeed_plugin is not None:
lowercase_ :str = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
lowercase_ :List[str] = 1
lowercase_ :Union[str, Any] = (len(_a ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase_ :int = get_linear_schedule_with_warmup(
optimizer=_a , num_warmup_steps=0 , num_training_steps=_a , )
else:
lowercase_ :str = DummyScheduler(_a , total_num_steps=_a , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ :Optional[Any] = accelerator.prepare(
_a , _a , _a , _a , _a )
# We need to keep track of how many total steps we have iterated over
lowercase_ :Dict = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase_ :int = 0
# Now we train the model
lowercase_ :str = {}
for epoch in range(_a , _a ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_a ):
lowercase_ :Optional[Any] = model(**_a )
lowercase_ :Dict = outputs.loss
lowercase_ :Dict = loss / gradient_accumulation_steps
accelerator.backward(_a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) )
accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) )
accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) )
accelerator.print(
'''Total Peak Memory consumed during the train (max): {}'''.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowercase_ :Union[str, Any] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"epoch-{epoch}"] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f:
json.dump(_a , _a )
def UpperCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ :List[str] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=_a , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=_a , )
parser.add_argument(
'''--output_dir''' , type=_a , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--peak_memory_upper_bound''' , type=_a , default=_a , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , )
parser.add_argument(
'''--n_train''' , type=_a , default=3_2_0 , help='''Number of training examples to use.''' , )
parser.add_argument(
'''--n_val''' , type=_a , default=1_6_0 , help='''Number of validation examples to use.''' , )
parser.add_argument(
'''--num_epochs''' , type=_a , default=1 , help='''Number of train epochs.''' , )
lowercase_ :Dict = parser.parse_args()
lowercase_ :Dict = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(_a , _a )
if __name__ == "__main__":
main()
| 252 | 1 |
def lowerCAmelCase__ ( a__: int ) -> str:
'''simple docstring'''
if number > 0:
raise ValueError('input must be a negative integer' )
_UpperCAmelCase = len(bin(lowerCAmelCase__ )[3:] )
_UpperCAmelCase = bin(abs(lowerCAmelCase__ ) - (1 << binary_number_length) )[3:]
_UpperCAmelCase = (
(
'1'
+ '0' * (binary_number_length - len(lowerCAmelCase__ ))
+ twos_complement_number
)
if number < 0
else '0'
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 329 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase_ = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowercase_ = 1_0
lowercase_ = 2_5_6
def lowercase ( lowerCAmelCase__ : List[str] ) -> Optional[MinHash]:
if len(lowerCAmelCase__ ) < MIN_NUM_TOKENS:
return None
__a = MinHash(num_perm=lowerCAmelCase__ )
for token in set(lowerCAmelCase__ ):
min_hash.update(token.encode() )
return min_hash
def lowercase ( lowerCAmelCase__ : str ) -> Set[str]:
return {t for t in NON_ALPHA.split(lowerCAmelCase__ ) if len(t.strip() ) > 0}
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , *,
_a = 0.85 , ):
__a = duplication_jaccard_threshold
__a = NUM_PERM
__a = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__a = defaultdict(_a )
def __UpperCAmelCase ( self , _a , _a ):
__a = self._index.query(_a )
if code_key in self._index.keys:
print(f'''Duplicate key {code_key}''' )
return
self._index.insert(_a , _a )
if len(_a ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_a )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_a )
def __UpperCAmelCase ( self ):
__a = []
for base, duplicates in self._duplicate_clusters.items():
__a = [base] + list(_a )
# reformat the cluster to be a list of dict
__a = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(_a )
return duplicate_clusters
def __UpperCAmelCase ( self , _a ):
__a = self.get_duplicate_clusters()
with open(_a , '''w''' ) as f:
json.dump(_a , _a )
def lowercase ( lowerCAmelCase__ : List[str] ) -> int:
__a , __a = element
__a = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowercase ( lowerCAmelCase__ : Type[Dataset] ) -> str:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase__ , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def lowercase ( lowerCAmelCase__ : Type[Dataset] , lowerCAmelCase__ : float ) -> Dict:
__a = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase__ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase__ , lowerCAmelCase__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> float:
__a = get_tokens(lowerCAmelCase__ )
__a = get_tokens(lowerCAmelCase__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowercase_ = None
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] ) -> Any:
__a = []
for elementa in cluster:
__a = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
__a = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(lowerCAmelCase__ , lowerCAmelCase__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__a = 1
extremes.append(lowerCAmelCase__ )
return extremes
def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] ) -> Optional[int]:
global _shared_dataset
__a = dataset
__a = []
__a = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase__ , lowerCAmelCase__ , ) , total=len(lowerCAmelCase__ ) , ):
extremes_list.append(lowerCAmelCase__ )
return extremes_list
def lowercase ( lowerCAmelCase__ : Type[Dataset] , lowerCAmelCase__ : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
__a = make_duplicate_clusters(lowerCAmelCase__ , lowerCAmelCase__ )
__a = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
__a = {}
__a = find_extremes(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for extremes in extremes_clusters:
for element in extremes:
__a = element
__a = duplicate_indices - set(extreme_dict.keys() )
__a = dataset.filter(lambda lowerCAmelCase__ , lowerCAmelCase__ : idx not in remove_indices , with_indices=lowerCAmelCase__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__a = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
__a = extreme_dict[element['''base_index''']]['''copies''']
print(f'''Original dataset size: {len(lowerCAmelCase__ )}''' )
print(f'''Number of duplicate clusters: {len(lowerCAmelCase__ )}''' )
print(f'''Files in duplicate cluster: {len(lowerCAmelCase__ )}''' )
print(f'''Unique files in duplicate cluster: {len(lowerCAmelCase__ )}''' )
print(f'''Filtered dataset size: {len(lowerCAmelCase__ )}''' )
return ds_filter, duplicate_clusters
| 45 | 0 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class a__ :
def __init__( self , UpperCAmelCase ) -> List[Any]:
__a = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__a = len(UpperCAmelCase ) - 1
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__a = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , UpperCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(UpperCAmelCase ) , 5 ) == 1
return output_values
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__a = self.basis_function(UpperCAmelCase )
__a = 0.0
__a = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase = 0.01 ) -> Optional[Any]:
from matplotlib import pyplot as plt # type: ignore
__a = [] # x coordinates of points to plot
__a = [] # y coordinates of points to plot
__a = 0.0
while t <= 1:
__a = self.bezier_curve_function(UpperCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__a = [i[0] for i in self.list_of_points]
__a = [i[1] for i in self.list_of_points]
plt.plot(
UpperCAmelCase , UpperCAmelCase , color='blue' , label='Curve of Degree ' + str(self.degree ) , )
plt.scatter(UpperCAmelCase , UpperCAmelCase , color='red' , label='Control Points' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 197 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 197 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : Dict = """altclip_text_model"""
def __init__( self: Tuple , snake_case: List[Any]=250_002 , snake_case: Dict=1_024 , snake_case: Tuple=24 , snake_case: int=16 , snake_case: Any=4_096 , snake_case: Optional[int]="gelu" , snake_case: List[Any]=0.1 , snake_case: Union[str, Any]=0.1 , snake_case: List[str]=514 , snake_case: List[Any]=1 , snake_case: Any=0.0_2 , snake_case: Optional[int]=0.0_2 , snake_case: Union[str, Any]=1E-05 , snake_case: List[Any]=1 , snake_case: Tuple=0 , snake_case: int=2 , snake_case: Dict="absolute" , snake_case: Optional[int]=True , snake_case: Tuple=768 , **snake_case: Union[str, Any] , ) -> Union[str, Any]:
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
snake_case_ :Dict = vocab_size
snake_case_ :str = hidden_size
snake_case_ :List[Any] = num_hidden_layers
snake_case_ :List[Any] = num_attention_heads
snake_case_ :Union[str, Any] = hidden_act
snake_case_ :Tuple = intermediate_size
snake_case_ :List[str] = hidden_dropout_prob
snake_case_ :int = attention_probs_dropout_prob
snake_case_ :int = max_position_embeddings
snake_case_ :Optional[int] = type_vocab_size
snake_case_ :Union[str, Any] = initializer_range
snake_case_ :str = initializer_factor
snake_case_ :List[str] = layer_norm_eps
snake_case_ :Tuple = position_embedding_type
snake_case_ :str = use_cache
snake_case_ :List[str] = project_dim
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : Optional[Any] = """altclip_vision_model"""
def __init__( self: Dict , snake_case: Dict=768 , snake_case: List[Any]=3_072 , snake_case: int=512 , snake_case: List[Any]=12 , snake_case: List[str]=12 , snake_case: Optional[int]=3 , snake_case: int=224 , snake_case: Optional[Any]=32 , snake_case: Optional[int]="quick_gelu" , snake_case: Tuple=1E-5 , snake_case: Tuple=0.0 , snake_case: List[str]=0.0_2 , snake_case: Union[str, Any]=1.0 , **snake_case: str , ) -> Optional[Any]:
super().__init__(**snake_case )
snake_case_ :int = hidden_size
snake_case_ :Optional[int] = intermediate_size
snake_case_ :List[str] = projection_dim
snake_case_ :Any = num_hidden_layers
snake_case_ :Optional[Any] = num_attention_heads
snake_case_ :Union[str, Any] = num_channels
snake_case_ :int = patch_size
snake_case_ :List[Any] = image_size
snake_case_ :int = initializer_range
snake_case_ :Optional[Any] = initializer_factor
snake_case_ :str = attention_dropout
snake_case_ :List[Any] = layer_norm_eps
snake_case_ :List[str] = hidden_act
@classmethod
def lowerCAmelCase_ ( cls: List[str] , snake_case: Union[str, os.PathLike] , **snake_case: List[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(snake_case )
snake_case_, snake_case_ :List[str] = cls.get_config_dict(snake_case , **snake_case )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("""model_type""" ) == "altclip":
snake_case_ :Optional[Any] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case , **snake_case )
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : str = """altclip"""
_A : Any = True
def __init__( self: int , snake_case: Any=None , snake_case: Optional[Any]=None , snake_case: str=768 , snake_case: int=2.6_5_9_2 , **snake_case: Union[str, Any] ) -> str:
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
snake_case_ :str = kwargs.pop("""text_config_dict""" , snake_case )
snake_case_ :List[str] = kwargs.pop("""vision_config_dict""" , snake_case )
super().__init__(**snake_case )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
snake_case_ :Tuple = {}
# This is the complete result when using `text_config_dict`.
snake_case_ :str = AltCLIPTextConfig(**snake_case ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
snake_case_ :List[str] = (
f"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """
f"""The value `text_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
snake_case_ :str = (
f"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """
f"""value `text_config[\"{key}\"]` will be overriden."""
)
logger.warning(snake_case )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
snake_case_ :Tuple = {}
# This is the complete result when using `vision_config_dict`.
snake_case_ :Dict = AltCLIPVisionConfig(**snake_case ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
snake_case_ :Union[str, Any] = {
str(snake_case ): value for key, value in _vision_config_dict["""id2label"""].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
snake_case_ :Dict = (
f"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """
f"""values. The value `vision_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
snake_case_ :Dict = (
f"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """
f"""The value `vision_config[\"{key}\"]` will be overriden."""
)
logger.warning(snake_case )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
snake_case_ :Optional[Any] = {}
logger.info("""`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.""" )
if vision_config is None:
snake_case_ :List[str] = {}
logger.info("""`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.""" )
snake_case_ :Tuple = AltCLIPTextConfig(**snake_case )
snake_case_ :str = AltCLIPVisionConfig(**snake_case )
snake_case_ :int = projection_dim
snake_case_ :Union[str, Any] = logit_scale_init_value
snake_case_ :Dict = 1.0
@classmethod
def lowerCAmelCase_ ( cls: str , snake_case: AltCLIPTextConfig , snake_case: AltCLIPVisionConfig , **snake_case: int ) -> Union[str, Any]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case )
def lowerCAmelCase_ ( self: int ) -> Tuple:
snake_case_ :str = copy.deepcopy(self.__dict__ )
snake_case_ :Any = self.text_config.to_dict()
snake_case_ :Optional[int] = self.vision_config.to_dict()
snake_case_ :List[Any] = self.__class__.model_type
return output
| 66 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=True , _lowercase="pt" ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = {'add_prefix_space': True} if isinstance(_lowercase , _lowercase ) and not line.startswith(' ' ) else {}
__UpperCamelCase = padding_side
return tokenizer(
[line] , max_length=_lowercase , padding='max_length' if pad_to_max_length else None , truncation=_lowercase , return_tensors=_lowercase , add_special_tokens=_lowercase , **_lowercase , )
def _A ( _lowercase , _lowercase , _lowercase=None , ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = input_ids.ne(_lowercase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCamelCase (_a ):
def __init__( self: List[str],A_: str,A_: List[str],A_: List[str],A_: List[str],A_: Tuple="train",A_: Any=None,A_: List[str]=None,A_: List[Any]=None,A_: int="",):
'''simple docstring'''
super().__init__()
__UpperCamelCase = Path(A_ ).joinpath(type_path + '.source' )
__UpperCamelCase = Path(A_ ).joinpath(type_path + '.target' )
__UpperCamelCase = self.get_char_lens(self.src_file )
__UpperCamelCase = max_source_length
__UpperCamelCase = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
__UpperCamelCase = tokenizer
__UpperCamelCase = prefix
if n_obs is not None:
__UpperCamelCase = self.src_lens[:n_obs]
__UpperCamelCase = src_lang
__UpperCamelCase = tgt_lang
def __len__( self: Optional[Any] ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self: int,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = index + 1 # linecache starts at 1
__UpperCamelCase = self.prefix + linecache.getline(str(self.src_file ),A_ ).rstrip('\n' )
__UpperCamelCase = linecache.getline(str(self.tgt_file ),A_ ).rstrip('\n' )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer,A_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__UpperCamelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer,A_ ) else self.tokenizer
)
__UpperCamelCase = self.tokenizer.generator if isinstance(self.tokenizer,A_ ) else self.tokenizer
__UpperCamelCase = encode_line(A_,A_,self.max_source_length,'right' )
__UpperCamelCase = encode_line(A_,A_,self.max_target_length,'right' )
__UpperCamelCase = source_inputs['input_ids'].squeeze()
__UpperCamelCase = target_inputs['input_ids'].squeeze()
__UpperCamelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case_ ( A_: List[Any] ):
'''simple docstring'''
return [len(A_ ) for x in Path(A_ ).open().readlines()]
def snake_case_ ( self: Union[str, Any],A_: Any ):
'''simple docstring'''
__UpperCamelCase = torch.stack([x['input_ids'] for x in batch] )
__UpperCamelCase = torch.stack([x['attention_mask'] for x in batch] )
__UpperCamelCase = torch.stack([x['decoder_input_ids'] for x in batch] )
__UpperCamelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer,A_ )
else self.tokenizer.pad_token_id
)
__UpperCamelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer,A_ )
else self.tokenizer.pad_token_id
)
__UpperCamelCase = trim_batch(A_,A_ )
__UpperCamelCase, __UpperCamelCase = trim_batch(A_,A_,attention_mask=A_ )
__UpperCamelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
__snake_case = getLogger(__name__)
def _A ( _lowercase ) -> Any:
"""simple docstring"""
return list(itertools.chain.from_iterable(_lowercase ) )
def _A ( _lowercase ) -> None:
"""simple docstring"""
__UpperCamelCase = get_git_info()
save_json(_lowercase , os.path.join(_lowercase , 'git_log.json' ) )
def _A ( _lowercase , _lowercase , _lowercase=4 , **_lowercase ) -> List[Any]:
"""simple docstring"""
with open(_lowercase , 'w' ) as f:
json.dump(_lowercase , _lowercase , indent=_lowercase , **_lowercase )
def _A ( _lowercase ) -> Union[str, Any]:
"""simple docstring"""
with open(_lowercase ) as f:
return json.load(_lowercase )
def _A ( ) -> Dict:
"""simple docstring"""
__UpperCamelCase = git.Repo(search_parent_directories=_lowercase )
__UpperCamelCase = {
'repo_id': str(_lowercase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def _A ( _lowercase , _lowercase ) -> List:
"""simple docstring"""
return list(map(_lowercase , _lowercase ) )
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
with open(_lowercase , 'wb' ) as f:
return pickle.dump(_lowercase , _lowercase )
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
def remove_articles(_lowercase ):
return re.sub(r'\b(a|an|the)\b' , ' ' , _lowercase )
def white_space_fix(_lowercase ):
return " ".join(text.split() )
def remove_punc(_lowercase ):
__UpperCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowercase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowercase ) ) ) )
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = normalize_answer(_lowercase ).split()
__UpperCamelCase = normalize_answer(_lowercase ).split()
__UpperCamelCase = Counter(_lowercase ) & Counter(_lowercase )
__UpperCamelCase = sum(common.values() )
if num_same == 0:
return 0
__UpperCamelCase = 1.0 * num_same / len(_lowercase )
__UpperCamelCase = 1.0 * num_same / len(_lowercase )
__UpperCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
return normalize_answer(_lowercase ) == normalize_answer(_lowercase )
def _A ( _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
assert len(_lowercase ) == len(_lowercase )
__UpperCamelCase = 0
for hypo, pred in zip(_lowercase , _lowercase ):
em += exact_match_score(_lowercase , _lowercase )
if len(_lowercase ) > 0:
em /= len(_lowercase )
return {"em": em}
def _A ( _lowercase ) -> Optional[Any]:
"""simple docstring"""
return model_prefix.startswith('rag' )
def _A ( _lowercase , _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
__UpperCamelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__UpperCamelCase = 'dropout_rate'
for p in extra_params:
if getattr(_lowercase , _lowercase , _lowercase ):
if not hasattr(_lowercase , _lowercase ) and not hasattr(_lowercase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_lowercase ) )
delattr(_lowercase , _lowercase )
continue
__UpperCamelCase = p if hasattr(_lowercase , _lowercase ) else equivalent_param[p]
setattr(_lowercase , _lowercase , getattr(_lowercase , _lowercase ) )
delattr(_lowercase , _lowercase )
return hparams, config
| 310 | 0 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def __init__(self : Tuple , _lowerCAmelCase : pyspark.sql.DataFrame , _lowerCAmelCase : Optional[NamedSplit] = None , _lowerCAmelCase : Optional[Features] = None , _lowerCAmelCase : bool = True , _lowerCAmelCase : str = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : str = None , _lowerCAmelCase : bool = True , _lowerCAmelCase : str = "arrow" , **_lowerCAmelCase : List[str] , ):
super().__init__(
split=_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase , streaming=_lowerCAmelCase , **_lowerCAmelCase , )
A = load_from_cache_file
A = file_format
A = Spark(
df=_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase , working_dir=_lowerCAmelCase , **_lowerCAmelCase , )
def A (self : Optional[int] ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
A = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=_lowerCAmelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 356 |
'''simple docstring'''
import os
def __a ( ) ->List[Any]:
"""simple docstring"""
A = os.path.join(os.path.dirname(UpperCAmelCase ) , """num.txt""" )
with open(UpperCAmelCase ) as file_hand:
return str(sum(int(UpperCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 337 | 0 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] ):
if isinstance(lowerCAmelCase__ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class UpperCamelCase__ :
def lowerCAmelCase (self : str , snake_case_ : str , snake_case_ : List[Any] ):
pass
def lowerCAmelCase (self : Dict ):
pass
def lowerCAmelCase (self : List[str] ):
pass
def lowerCAmelCase (self : Dict , snake_case_ : np.ndarray , snake_case_ : np.ndarray , snake_case_ : float ):
__a : int = np.abs((a - b) ).max()
self.assertLessEqual(snake_case_ , snake_case_ , f"Difference between torch and flax is {diff} (>= {tol})." )
def lowerCAmelCase (self : int , snake_case_ : Optional[int] , snake_case_ : List[str] , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : int=None , **snake_case_ : Optional[Any] ):
__a : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(snake_case_ , snake_case_ )
__a : List[str] = FlaxVisionTextDualEncoderModel(snake_case_ )
__a : int = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def lowerCAmelCase (self : List[Any] , snake_case_ : Optional[int] , snake_case_ : str , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any]=None , **snake_case_ : str ):
__a , __a : Union[str, Any] = self.get_vision_text_model(snake_case_ , snake_case_ )
__a : List[Any] = {'''vision_model''': vision_model, '''text_model''': text_model}
__a : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case_ )
__a : Union[str, Any] = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowerCAmelCase (self : Any , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any]=None , **snake_case_ : Union[str, Any] ):
__a , __a : Optional[int] = self.get_vision_text_model(snake_case_ , snake_case_ )
__a : List[Any] = {'''vision_model''': vision_model, '''text_model''': text_model}
__a : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case_ )
__a : Optional[int] = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ )
__a : int = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
__a : Optional[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(snake_case_ )
__a : Tuple = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ )
__a : List[Any] = after_output[0]
__a : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case_ , 1E-3 )
def lowerCAmelCase (self : int , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : List[Any]=None , **snake_case_ : Dict ):
__a , __a : Union[str, Any] = self.get_vision_text_model(snake_case_ , snake_case_ )
__a : Dict = {'''vision_model''': vision_model, '''text_model''': text_model}
__a : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case_ )
__a : str = model(
input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ , output_attentions=snake_case_ )
__a : Dict = output.vision_model_output.attentions
self.assertEqual(len(snake_case_ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__a : int = to_atuple(vision_model.config.image_size )
__a : Tuple = to_atuple(vision_model.config.patch_size )
__a : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__a : Optional[int] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__a : Tuple = output.text_model_output.attentions
self.assertEqual(len(snake_case_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowerCAmelCase (self : Dict , snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Any ):
pt_model.to(snake_case_ )
pt_model.eval()
# prepare inputs
__a : int = inputs_dict
__a : Optional[int] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
__a : Optional[int] = pt_model(**snake_case_ ).to_tuple()
__a : Any = fx_model(**snake_case_ ).to_tuple()
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(snake_case_ , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(snake_case_ )
__a : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(snake_case_ , from_pt=snake_case_ )
__a : Any = fx_model_loaded(**snake_case_ ).to_tuple()
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(snake_case_ , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(snake_case_ )
__a : Optional[int] = VisionTextDualEncoderModel.from_pretrained(snake_case_ , from_flax=snake_case_ )
pt_model_loaded.to(snake_case_ )
pt_model_loaded.eval()
with torch.no_grad():
__a : str = pt_model_loaded(**snake_case_ ).to_tuple()
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(snake_case_ , pt_output_loaded.numpy() , 4E-2 )
def lowerCAmelCase (self : List[str] , snake_case_ : str , snake_case_ : List[Any] , snake_case_ : Optional[Any] ):
__a : Any = VisionTextDualEncoderConfig.from_vision_text_configs(snake_case_ , snake_case_ )
__a : Union[str, Any] = VisionTextDualEncoderModel(snake_case_ )
__a : Tuple = FlaxVisionTextDualEncoderModel(snake_case_ )
__a : List[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , snake_case_ )
__a : Optional[int] = fx_state
self.check_pt_flax_equivalence(snake_case_ , snake_case_ , snake_case_ )
def lowerCAmelCase (self : Any , snake_case_ : str , snake_case_ : Any , snake_case_ : int ):
__a : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(snake_case_ , snake_case_ )
__a : List[str] = VisionTextDualEncoderModel(snake_case_ )
__a : Optional[Any] = FlaxVisionTextDualEncoderModel(snake_case_ )
__a : List[Any] = load_flax_weights_in_pytorch_model(snake_case_ , fx_model.params )
self.check_pt_flax_equivalence(snake_case_ , snake_case_ , snake_case_ )
def lowerCAmelCase (self : Union[str, Any] ):
__a : Any = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**snake_case_ )
def lowerCAmelCase (self : Optional[Any] ):
__a : Union[str, Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**snake_case_ )
def lowerCAmelCase (self : int ):
__a : str = self.prepare_config_and_inputs()
self.check_save_load(**snake_case_ )
def lowerCAmelCase (self : int ):
__a : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**snake_case_ )
@is_pt_flax_cross_test
def lowerCAmelCase (self : Tuple ):
__a : str = self.prepare_config_and_inputs()
__a : str = config_inputs_dict.pop('''vision_config''' )
__a : Union[str, Any] = config_inputs_dict.pop('''text_config''' )
__a : List[str] = config_inputs_dict
self.check_equivalence_pt_to_flax(snake_case_ , snake_case_ , snake_case_ )
self.check_equivalence_flax_to_pt(snake_case_ , snake_case_ , snake_case_ )
@slow
def lowerCAmelCase (self : Tuple ):
__a , __a : Union[str, Any] = self.get_pretrained_model_and_inputs()
__a : Optional[Any] = model_a(**snake_case_ )
__a : Union[str, Any] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(snake_case_ )
__a : Dict = FlaxVisionTextDualEncoderModel.from_pretrained(snake_case_ )
__a : Tuple = model_a(**snake_case_ )
__a : int = after_outputs[0]
__a : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case_ , 1E-5 )
@require_flax
class UpperCamelCase__ ( __lowercase ,unittest.TestCase ):
def lowerCAmelCase (self : List[str] ):
__a : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=snake_case_ , text_from_pt=snake_case_ , )
__a : List[str] = 1_3
__a : Optional[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__a : Any = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__a : Any = random_attention_mask([batch_size, 4] )
__a : Dict = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def lowerCAmelCase (self : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Any ):
__a : int = FlaxViTModel(snake_case_ )
__a : str = FlaxBertModel(snake_case_ )
return vision_model, text_model
def lowerCAmelCase (self : List[Any] ):
__a : Tuple = FlaxViTModelTester(self )
__a : int = FlaxBertModelTester(self )
__a : Union[str, Any] = vit_model_tester.prepare_config_and_inputs()
__a : List[str] = bert_model_tester.prepare_config_and_inputs()
__a , __a : Tuple = vision_config_and_inputs
__a , __a , __a , __a : Optional[int] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class UpperCamelCase__ ( __lowercase ,unittest.TestCase ):
def lowerCAmelCase (self : List[Any] ):
__a : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=snake_case_ , text_from_pt=snake_case_ , )
__a : int = 1_3
__a : Optional[int] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__a : Dict = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__a : Optional[int] = random_attention_mask([batch_size, 4] )
__a : Tuple = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def lowerCAmelCase (self : Optional[int] , snake_case_ : str , snake_case_ : str ):
__a : Union[str, Any] = FlaxCLIPVisionModel(snake_case_ )
__a : List[Any] = FlaxBertModel(snake_case_ )
return vision_model, text_model
def lowerCAmelCase (self : Dict ):
__a : Optional[int] = FlaxCLIPVisionModelTester(self )
__a : Union[str, Any] = FlaxBertModelTester(self )
__a : Any = clip_model_tester.prepare_config_and_inputs()
__a : List[Any] = bert_model_tester.prepare_config_and_inputs()
__a , __a : List[Any] = vision_config_and_inputs
__a , __a , __a , __a : Optional[Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase (self : List[Any] ):
__a : List[Any] = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 )
__a : List[Any] = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
__a : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__a : Union[str, Any] = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=snake_case_ , padding=snake_case_ , return_tensors='''np''' )
__a : List[Any] = model(**snake_case_ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__a : Dict = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , snake_case_ , atol=1E-3 ) )
| 216 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
lowercase__ =getLogger(__name__)
def __UpperCamelCase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int = 8 , lowerCAmelCase__ : int = 1_0_2_4 , lowerCAmelCase__ : Union[str, Any]="val" , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Any=False , lowerCAmelCase__ : Union[str, Any]="summarization" , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Optional[int]=1 , lowerCAmelCase__ : Dict = None , lowerCAmelCase__ : int="" , **lowerCAmelCase__ : int , ):
__a : List[Any] = str(lowerCAmelCase__ )
assert local_rank is not None
torch.distributed.init_process_group(backend='''nccl''' , rank=lowerCAmelCase__ )
__a : Tuple = Path(lowerCAmelCase__ )
__a : Dict = save_dir.joinpath(f"rank_{local_rank}_output.json" )
torch.cuda.set_device(lowerCAmelCase__ )
__a : Dict = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__ ).cuda()
if fpaa:
__a : str = model.half()
# determine if we need to increase num_beams
use_task_specific_params(lowerCAmelCase__ , lowerCAmelCase__ ) # update config with task specific params
__a : List[str] = generate_kwargs.pop('''num_beams''' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
__a : Dict = num_return_sequences
__a : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
logger.info(f"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
__a : Dict = tokenizer.model_max_length
if prefix is None:
__a : Dict = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
__a : List[Any] = SeqaSeqDataset(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , max_target_length=1_0_2_4 , type_path=lowerCAmelCase__ , n_obs=lowerCAmelCase__ , prefix=lowerCAmelCase__ , **lowerCAmelCase__ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
__a : Tuple = ds.make_sortish_sampler(lowerCAmelCase__ , distributed=lowerCAmelCase__ , add_extra_examples=lowerCAmelCase__ , shuffle=lowerCAmelCase__ )
__a : List[Any] = DataLoader(lowerCAmelCase__ , sampler=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=ds.collate_fn )
__a : List[Any] = []
for batch in tqdm(lowerCAmelCase__ ):
__a : Any = model.generate(
input_ids=batch['''input_ids'''].to(model.device ) , attention_mask=batch['''attention_mask'''].to(model.device ) , num_return_sequences=lowerCAmelCase__ , num_beams=lowerCAmelCase__ , **lowerCAmelCase__ , )
__a : List[Any] = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
__a : int = batch['''ids''']
if num_return_sequences > 1:
__a : List[str] = chunks(lowerCAmelCase__ , lowerCAmelCase__ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(lowerCAmelCase__ ):
results.append({'''pred''': pred, '''id''': ids[i].item()} )
save_json(lowerCAmelCase__ , lowerCAmelCase__ )
return results, sampler.num_replicas
def __UpperCamelCase ( ):
__a : str = argparse.ArgumentParser(
epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' )
parser.add_argument('''--data_dir''' , type=lowerCAmelCase__ , help='''like cnn_dm/test.source''' )
parser.add_argument(
'''--model_name''' , type=lowerCAmelCase__ , help='''like facebook/bart-large-cnn,t5-base, etc.''' , default='''sshleifer/distilbart-xsum-12-3''' , )
parser.add_argument('''--save_dir''' , type=lowerCAmelCase__ , help='''where to save''' , default='''tmp_gen''' )
parser.add_argument('''--max_source_length''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ )
parser.add_argument(
'''--type_path''' , type=lowerCAmelCase__ , default='''test''' , help='''which subset to evaluate typically train/val/test''' )
parser.add_argument('''--task''' , type=lowerCAmelCase__ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=lowerCAmelCase__ , default=8 , required=lowerCAmelCase__ , help='''batch size''' )
parser.add_argument(
'''--local_rank''' , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help='''should be passed by distributed.launch''' )
parser.add_argument(
'''--n_obs''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''How many observations. Defaults to all.''' )
parser.add_argument(
'''--num_return_sequences''' , type=lowerCAmelCase__ , default=1 , required=lowerCAmelCase__ , help='''How many sequences to return''' )
parser.add_argument(
'''--sync_timeout''' , type=lowerCAmelCase__ , default=6_0_0 , required=lowerCAmelCase__ , help='''How long should master process wait for other processes to finish.''' , )
parser.add_argument('''--src_lang''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ )
parser.add_argument('''--tgt_lang''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ )
parser.add_argument(
'''--prefix''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--debug''' , action='''store_true''' )
__a : int = time.time()
__a , __a : Tuple = parser.parse_known_args()
__a : Optional[int] = parse_numeric_n_bool_cl_kwargs(lowerCAmelCase__ )
if generate_kwargs and args.local_rank <= 0:
print(f"parsed the following generate kwargs: {generate_kwargs}" )
__a : Union[str, Any] = Path(args.save_dir + '''_tmp''' )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ ) # this handles locking.
__a : Dict = list(json_save_dir.glob('''rank_*.json''' ) )
if intermediate_files:
raise ValueError(f"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
__a : Optional[Any] = {}
if args.src_lang is not None:
__a : int = args.src_lang
if args.tgt_lang is not None:
__a : Optional[Any] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=lowerCAmelCase__ )
__a , __a : Tuple = eval_data_dir(
args.data_dir , lowerCAmelCase__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=lowerCAmelCase__ , **lowerCAmelCase__ , )
if args.local_rank <= 0:
__a : int = Path(args.save_dir )
save_dir.mkdir(exist_ok=lowerCAmelCase__ )
__a : List[str] = gather_results_from_each_node(lowerCAmelCase__ , lowerCAmelCase__ , args.sync_timeout )
__a : int = combine_partial_results(lowerCAmelCase__ )
if args.num_return_sequences > 1:
__a : List[Any] = save_dir.joinpath('''pseudolabel_results.json''' )
print(f"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(lowerCAmelCase__ , lowerCAmelCase__ )
return
__a : Any = Path(args.data_dir ).joinpath(args.type_path + '''.target''' )
with open(lowerCAmelCase__ ) as f:
__a : Optional[int] = [x.rstrip() for x in f.readlines()][: len(lowerCAmelCase__ )]
# Calculate metrics, save metrics, and save _generations.txt
__a : str = '''translation''' in args.task
__a : List[str] = calculate_bleu if calc_bleu else calculate_rouge
__a : Any = '''bleu''' if calc_bleu else '''rouge'''
__a : Dict = score_fn(lowerCAmelCase__ , lowerCAmelCase__ )
__a : Dict = len(lowerCAmelCase__ )
__a : str = time.time() - start_time
__a : List[str] = round(runtime / metrics['''n_obs'''] , 4 )
__a : Tuple = num_replicas
# TODO(@stas00): add whatever metadata to metrics
__a : Optional[int] = save_dir.joinpath(f"{args.type_path}_{metric_name}.json" )
save_json(lowerCAmelCase__ , lowerCAmelCase__ , indent=lowerCAmelCase__ )
print(lowerCAmelCase__ )
write_txt_file(lowerCAmelCase__ , save_dir.joinpath(f"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(lowerCAmelCase__ , save_dir.joinpath(f"{args.type_path}.target" ) )
else:
shutil.rmtree(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int] ):
__a : Optional[int] = []
for partial_result in partial_results:
records.extend(lowerCAmelCase__ )
__a : Tuple = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x["id"] )
__a : Tuple = [x['''pred'''] for x in records]
return preds
def __UpperCamelCase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict ):
# WAIT FOR lots of .json files
__a : Tuple = time.time()
logger.info('''waiting for all nodes to finish''' )
__a : Optional[int] = None
while (time.time() - start_wait) < timeout:
__a : Optional[int] = list(save_dir.glob('''rank_*.json''' ) )
if len(lowerCAmelCase__ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
__a : Tuple = lmap(lowerCAmelCase__ , lowerCAmelCase__ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('''Rank 0 gave up on waiting for other processes''' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 216 | 1 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def _lowercase ( ) -> Tuple:
'''simple docstring'''
raise RuntimeError('CUDA out of memory.' )
class lowercase__ ( nn.Module ):
def __init__( self : str ):
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(3 , 4 )
SCREAMING_SNAKE_CASE__ = nn.BatchNormad(4 )
SCREAMING_SNAKE_CASE__ = nn.Linear(4 , 5 )
def A_ ( self : str , UpperCAmelCase_ : Union[str, Any] ):
return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase_ ) ) )
class lowercase__ ( unittest.TestCase ):
def A_ ( self : Dict ):
SCREAMING_SNAKE_CASE__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase_ : Optional[Any] ):
nonlocal batch_sizes
batch_sizes.append(UpperCAmelCase_ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(UpperCAmelCase_ , [128, 64, 32, 16, 8] )
def A_ ( self : Dict ):
SCREAMING_SNAKE_CASE__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ):
nonlocal batch_sizes
batch_sizes.append(UpperCAmelCase_ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = mock_training_loop_function('hello' )
self.assertListEqual(UpperCAmelCase_ , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, 'hello'] )
def A_ ( self : Any ):
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(UpperCAmelCase_ : Any ):
pass
with self.assertRaises(UpperCAmelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def A_ ( self : Dict ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCAmelCase_ : Tuple ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(UpperCAmelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def A_ ( self : List[Any] ):
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(UpperCAmelCase_ ) as cm:
mock_training_loop_function(128 , 'hello' , 'world' )
self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] )
def A_ ( self : List[str] ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCAmelCase_ : List[str] ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(UpperCAmelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!' , cm.exception.args[0] )
@require_cuda
def A_ ( self : Dict ):
SCREAMING_SNAKE_CASE__ = torch.cuda.memory_allocated()
SCREAMING_SNAKE_CASE__ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = release_memory(UpperCAmelCase_ )
self.assertEqual(torch.cuda.memory_allocated() , UpperCAmelCase_ )
| 169 |
import math
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> float:
'''simple docstring'''
if initial_intensity < 0:
raise ValueError('The value of intensity cannot be negative' )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(UpperCamelCase_ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="""malus_law""")
| 169 | 1 |
from ...configuration_utils import PretrainedConfig
UpperCAmelCase_ : Any = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : List[Any] = '''tapas'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int=3_0_5_2_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7_6_8 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : List[Any]=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=1_0_2_4 , SCREAMING_SNAKE_CASE__ : List[str]=[3, 2_5_6, 2_5_6, 2, 2_5_6, 2_5_6, 1_0] , SCREAMING_SNAKE_CASE__ : str=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1E-12 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : List[Any]=10.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : Optional[int]=1.0 , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : List[str]=1.0 , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : int=1.0 , SCREAMING_SNAKE_CASE__ : int=1.0 , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : List[Any]="ratio" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : List[str]=6_4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_2 , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Tuple=None , **SCREAMING_SNAKE_CASE__ : str , ) -> str:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
a_ : Any = vocab_size
a_ : int = hidden_size
a_ : List[str] = num_hidden_layers
a_ : Optional[Any] = num_attention_heads
a_ : Optional[int] = hidden_act
a_ : List[Any] = intermediate_size
a_ : Union[str, Any] = hidden_dropout_prob
a_ : Union[str, Any] = attention_probs_dropout_prob
a_ : Optional[Any] = max_position_embeddings
a_ : int = type_vocab_sizes
a_ : str = initializer_range
a_ : Optional[Any] = layer_norm_eps
# Fine-tuning task hyperparameters
a_ : List[Any] = positive_label_weight
a_ : int = num_aggregation_labels
a_ : Union[str, Any] = aggregation_loss_weight
a_ : Dict = use_answer_as_supervision
a_ : Optional[int] = answer_loss_importance
a_ : Any = use_normalized_answer_loss
a_ : Optional[int] = huber_loss_delta
a_ : Tuple = temperature
a_ : str = aggregation_temperature
a_ : Union[str, Any] = use_gumbel_for_cells
a_ : Tuple = use_gumbel_for_aggregation
a_ : Any = average_approximation_function
a_ : Dict = cell_selection_preference
a_ : Any = answer_loss_cutoff
a_ : Dict = max_num_rows
a_ : str = max_num_columns
a_ : Union[str, Any] = average_logits_per_cell
a_ : List[Any] = select_one_column
a_ : Any = allow_empty_column_selection
a_ : int = init_cell_selection_weights_to_zero
a_ : List[Any] = reset_position_index_per_cell
a_ : Tuple = disable_per_token_loss
# Aggregation hyperparameters
a_ : Union[str, Any] = aggregation_labels
a_ : Optional[int] = no_aggregation_label_index
if isinstance(self.aggregation_labels , SCREAMING_SNAKE_CASE__ ):
a_ : Optional[Any] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in aggregation_labels.items()}
| 32 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a__ : str = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 349 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _UpperCAmelCase ( a ,a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =StableDiffusionControlNetImgaImgPipeline
a__ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
a__ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a__ =IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
a__ =IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase ( self ) -> Any:
torch.manual_seed(0 )
_UpperCAmelCase : Tuple = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
torch.manual_seed(0 )
_UpperCAmelCase : int = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
torch.manual_seed(0 )
_UpperCAmelCase : str = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A , set_alpha_to_one=A , )
torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCAmelCase : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_UpperCAmelCase : Union[str, Any] = CLIPTextModel(A )
_UpperCAmelCase : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCAmelCase : Optional[Any] = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self , A , A=0 ) -> List[str]:
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : Dict = torch.manual_seed(A )
else:
_UpperCAmelCase : Tuple = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : Optional[Any] = 2
_UpperCAmelCase : Tuple = randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=A , device=torch.device(A ) , )
_UpperCAmelCase : Any = floats_tensor(control_image.shape , rng=random.Random(A ) ).to(A )
_UpperCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase : Optional[int] = Image.fromarray(np.uinta(A ) ).convert('''RGB''' ).resize((6_4, 6_4) )
_UpperCAmelCase : str = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def __lowerCAmelCase ( self ) -> Optional[Any]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> int:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class _UpperCAmelCase ( a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =StableDiffusionControlNetImgaImgPipeline
a__ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
a__ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a__ =frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __lowerCAmelCase ( self ) -> int:
torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
torch.manual_seed(0 )
def init_weights(A ):
if isinstance(A , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
_UpperCAmelCase : Dict = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
controlneta.controlnet_down_blocks.apply(A )
torch.manual_seed(0 )
_UpperCAmelCase : int = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
controlneta.controlnet_down_blocks.apply(A )
torch.manual_seed(0 )
_UpperCAmelCase : List[str] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A , set_alpha_to_one=A , )
torch.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_UpperCAmelCase : Tuple = CLIPTextModel(A )
_UpperCAmelCase : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCAmelCase : Union[str, Any] = MultiControlNetModel([controlneta, controlneta] )
_UpperCAmelCase : Optional[int] = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self , A , A=0 ) -> List[Any]:
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : str = torch.manual_seed(A )
else:
_UpperCAmelCase : Any = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : Optional[int] = 2
_UpperCAmelCase : int = [
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=A , device=torch.device(A ) , ),
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=A , device=torch.device(A ) , ),
]
_UpperCAmelCase : int = floats_tensor(control_image[0].shape , rng=random.Random(A ) ).to(A )
_UpperCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase : List[Any] = Image.fromarray(np.uinta(A ) ).convert('''RGB''' ).resize((6_4, 6_4) )
_UpperCAmelCase : int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : Union[str, Any] = self.get_dummy_components()
_UpperCAmelCase : Optional[Any] = self.pipeline_class(**A )
pipe.to(A )
_UpperCAmelCase : List[Any] = 10.0
_UpperCAmelCase : List[Any] = 4
_UpperCAmelCase : Tuple = self.get_dummy_inputs(A )
_UpperCAmelCase : Dict = steps
_UpperCAmelCase : Optional[int] = scale
_UpperCAmelCase : Optional[Any] = pipe(**A )[0]
_UpperCAmelCase : Optional[Any] = self.get_dummy_inputs(A )
_UpperCAmelCase : Tuple = steps
_UpperCAmelCase : str = scale
_UpperCAmelCase : Tuple = pipe(**A , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
_UpperCAmelCase : Dict = self.get_dummy_inputs(A )
_UpperCAmelCase : str = steps
_UpperCAmelCase : List[str] = scale
_UpperCAmelCase : Optional[Any] = pipe(**A , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
_UpperCAmelCase : Tuple = self.get_dummy_inputs(A )
_UpperCAmelCase : Dict = steps
_UpperCAmelCase : Tuple = scale
_UpperCAmelCase : Optional[int] = pipe(**A , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def __lowerCAmelCase ( self ) -> Optional[Any]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Tuple = self.get_dummy_components()
_UpperCAmelCase : Optional[int] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(A )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : List[str] = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
_UpperCAmelCase : int = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=A , controlnet=A )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : Optional[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
_UpperCAmelCase : str = '''evil space-punk bird'''
_UpperCAmelCase : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((5_1_2, 5_1_2) )
_UpperCAmelCase : Dict = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((5_1_2, 5_1_2) )
_UpperCAmelCase : Optional[int] = pipe(
A , A , control_image=A , generator=A , output_type='''np''' , num_inference_steps=5_0 , strength=0.6 , )
_UpperCAmelCase : Tuple = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9E-2
| 351 |
"""simple docstring"""
import qiskit
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int ):
_UpperCAmelCase : List[str] = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
_UpperCAmelCase : Union[str, Any] = qiskit.QuantumCircuit(UpperCamelCase__ , UpperCamelCase__ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_UpperCAmelCase : Tuple = qiskit.execute(UpperCamelCase__ , UpperCamelCase__ , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCAmelCase :str = single_qubit_measure(2, 2)
print(f"Total count for various states are: {counts}")
| 68 | 0 |
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] ):
'''simple docstring'''
lowerCamelCase_ = [1]
lowerCamelCase_ = 0, 0, 0
lowerCamelCase_ = ugly_nums[ia] * 2
lowerCamelCase_ = ugly_nums[ia] * 3
lowerCamelCase_ = ugly_nums[ia] * 5
for _ in range(1 , _A ):
lowerCamelCase_ = min(_A , _A , _A )
ugly_nums.append(_A )
if next_num == next_a:
ia += 1
lowerCamelCase_ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
lowerCamelCase_ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
lowerCamelCase_ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"""{ugly_numbers(200) = }""")
| 204 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = 32 , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 2_55 , lowerCAmelCase__ = True , lowerCAmelCase__ = True , lowerCAmelCase__ = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , lowerCAmelCase__ = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , lowerCAmelCase__ = True , lowerCAmelCase__=7 , lowerCAmelCase__=30 , lowerCAmelCase__=4_00 , lowerCAmelCase__=3 , ) -> Union[str, Any]:
__magic_name__ : str = parent
__magic_name__ : Dict = do_resize
__magic_name__ : Union[str, Any] = size if size is not None else {"""shortest_edge""": 2_88}
__magic_name__ : Union[str, Any] = size_divisor
__magic_name__ : Union[str, Any] = do_rescale
__magic_name__ : Dict = rescale_factor
__magic_name__ : Union[str, Any] = do_normalize
__magic_name__ : List[str] = do_center_crop
__magic_name__ : Tuple = image_mean
__magic_name__ : Tuple = image_std
__magic_name__ : Tuple = do_pad
__magic_name__ : int = batch_size
__magic_name__ : List[Any] = num_channels
__magic_name__ : int = min_resolution
__magic_name__ : str = max_resolution
def __magic_name__ ( self ) -> str:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> int:
if not batched:
__magic_name__ : Dict = self.size["""shortest_edge"""]
__magic_name__ : List[str] = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
__magic_name__ ,__magic_name__ : List[Any] = image.size
else:
__magic_name__ ,__magic_name__ : Dict = image.shape[1], image.shape[2]
__magic_name__ : List[Any] = size / min(lowerCAmelCase__ , lowerCAmelCase__ )
if h < w:
__magic_name__ ,__magic_name__ : str = size, scale * w
else:
__magic_name__ ,__magic_name__ : Optional[Any] = scale * h, size
__magic_name__ : Tuple = int((13_33 / 8_00) * size )
if max(lowerCAmelCase__ , lowerCAmelCase__ ) > max_size:
__magic_name__ : Union[str, Any] = max_size / max(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = newh * scale
__magic_name__ : Any = neww * scale
__magic_name__ ,__magic_name__ : str = int(newh + 0.5 ), int(neww + 0.5 )
__magic_name__ ,__magic_name__ : int = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__magic_name__ : Union[str, Any] = []
for image in image_inputs:
__magic_name__ ,__magic_name__ : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__magic_name__ : Optional[Any] = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0]
__magic_name__ : Tuple = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : int = BridgeTowerImageProcessor if is_vision_available() else None
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : Any = BridgeTowerImageProcessingTester(self )
@property
def __magic_name__ ( self ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self ) -> Any:
__magic_name__ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_std""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size_divisor""" ) )
def __magic_name__ ( self ) -> Optional[int]:
pass
def __magic_name__ ( self ) -> Tuple:
# Initialize image processor
__magic_name__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
__magic_name__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : str = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ : Any = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __magic_name__ ( self ) -> Tuple:
# Initialize image processor
__magic_name__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
__magic_name__ : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : List[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ : Any = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : Optional[int] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __magic_name__ ( self ) -> str:
# Initialize image processor
__magic_name__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
__magic_name__ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ : Dict = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 342 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_UpperCamelCase : int = {
'configuration_efficientformer': [
'EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientFormerConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[int] = ['EfficientFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = [
'EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientFormerForImageClassification',
'EfficientFormerForImageClassificationWithTeacher',
'EfficientFormerModel',
'EfficientFormerPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[Any] = [
'TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFEfficientFormerForImageClassification',
'TFEfficientFormerForImageClassificationWithTeacher',
'TFEfficientFormerModel',
'TFEfficientFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 371 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
_UpperCamelCase : Optional[Any] = 'examples/'
_UpperCamelCase : Any = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
_UpperCamelCase : List[str] = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
_UpperCamelCase : List[str] = 'README.md'
def snake_case (A_ :str , A_ :Optional[Any] , A_ :Any ):
'''simple docstring'''
with open(A_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
a : Tuple = f.read()
a, a : Any = REPLACE_PATTERNS[pattern]
a : Dict = replace.replace('VERSION' , A_ )
a : Union[str, Any] = re_pattern.sub(A_ , A_ )
with open(A_ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(A_ )
def snake_case (A_ :List[Any] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(A_ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(A_ , A_ ) , A_ , pattern='examples' )
def snake_case (A_ :Tuple , A_ :Optional[Any]=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(A_ , A_ , A_ )
if not patch:
update_version_in_examples(A_ )
def snake_case ():
'''simple docstring'''
a : str = '🤗 Transformers currently provides the following architectures'
a : Dict = '1. Want to contribute a new model?'
with open(A_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
a : Optional[Any] = f.readlines()
# Find the start of the list.
a : List[str] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
a : Optional[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
a : int = lines[index].replace(
'https://huggingface.co/docs/diffusers/main/model_doc' , 'https://huggingface.co/docs/diffusers/model_doc' , )
index += 1
with open(A_ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(A_ )
def snake_case ():
'''simple docstring'''
with open(REPLACE_FILES['init'] , 'r' ) as f:
a : List[str] = f.read()
a : str = REPLACE_PATTERNS['init'][0].search(A_ ).groups()[0]
return packaging.version.parse(A_ )
def snake_case (A_ :Optional[Any]=False ):
'''simple docstring'''
a : Optional[int] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
a : Tuple = default_version.base_version
elif patch:
a : Union[str, Any] = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
a : Optional[Any] = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
a : Union[str, Any] = input(f'''Which version are you releasing? [{default_version}]''' )
if len(A_ ) == 0:
a : int = default_version
print(f'''Updating version to {version}.''' )
global_version_update(A_ , patch=A_ )
def snake_case ():
'''simple docstring'''
a : str = get_version()
a : Optional[int] = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
a : Optional[int] = current_version.base_version
# Check with the user we got that right.
a : str = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(A_ ) == 0:
a : Union[str, Any] = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(A_ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
_UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
_UpperCamelCase : Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 186 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = ProphetNetTokenizer
_snake_case = False
def A__ ( self ) -> List[Any]:
super().setUp()
__lowerCAmelCase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def A__ ( self , snake_case_ ) -> Dict:
__lowerCAmelCase = """UNwant\u00E9d,running"""
__lowerCAmelCase = """unwanted, running"""
return input_text, output_text
def A__ ( self ) -> List[Any]:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file )
__lowerCAmelCase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(snake_case_ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [9, 6, 7, 12, 10, 11] )
def A__ ( self ) -> List[Any]:
__lowerCAmelCase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def A__ ( self ) -> List[Any]:
__lowerCAmelCase = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def A__ ( self ) -> Dict:
__lowerCAmelCase = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def A__ ( self ) -> str:
__lowerCAmelCase = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def A__ ( self ) -> str:
__lowerCAmelCase = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def A__ ( self ) -> str:
__lowerCAmelCase = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def A__ ( self ) -> Tuple:
__lowerCAmelCase = BasicTokenizer(do_lower_case=snake_case_ , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def A__ ( self ) -> str:
__lowerCAmelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
__lowerCAmelCase = {}
for i, token in enumerate(snake_case_ ):
__lowerCAmelCase = i
__lowerCAmelCase = WordpieceTokenizer(vocab=snake_case_ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
@require_torch
def A__ ( self ) -> Dict:
__lowerCAmelCase = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
__lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__lowerCAmelCase = [1_037, 2_146, 20_423, 2_005, 7_680, 7_849, 3_989, 1_012, 102]
__lowerCAmelCase = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(snake_case_ , snake_case_ )
__lowerCAmelCase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(snake_case_ , snake_case_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def A__ ( self ) -> List[Any]:
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def A__ ( self ) -> Tuple:
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def A__ ( self ) -> List[Any]:
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
@slow
def A__ ( self ) -> str:
__lowerCAmelCase = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
__lowerCAmelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=snake_case_ )
__lowerCAmelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=snake_case_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 301 |
"""simple docstring"""
from __future__ import annotations
from statistics import mean
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
__lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i]
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__lowerCAmelCase = []
__lowerCAmelCase = -1
for i in range(_lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
__lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
__lowerCAmelCase = 0
__lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('''[TEST CASE 01]''')
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = [2, 5, 3, 7]
SCREAMING_SNAKE_CASE_ = [0, 0, 0, 0]
SCREAMING_SNAKE_CASE_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE_ = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''')
for i, process_id in enumerate(list(range(1, 5))):
print(
F"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
F"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(F"\nAverage waiting time = {mean(waiting_time):.5f}")
print(F"Average turnaround time = {mean(turn_around_time):.5f}")
| 301 | 1 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
__snake_case = None
__snake_case = None
@property
def UpperCamelCase__ ( self ):
return self.feat_extract_tester.prepare_feat_extract_dict()
def UpperCamelCase__ ( self ):
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''feature_size''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''sampling_rate''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''padding_value''' ) )
def UpperCamelCase__ ( self ):
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common()
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) for x, y in zip(_UpperCAmelCase , processed_features[input_name] ) ) )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_UpperCAmelCase )
snake_case_ = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
snake_case_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
snake_case_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def UpperCamelCase__ ( self ):
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_UpperCAmelCase )
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
snake_case_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
snake_case_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def UpperCamelCase__ ( self ):
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_UpperCAmelCase )
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} , tensor_type='''tf''' )
snake_case_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
snake_case_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def UpperCamelCase__ ( self , _UpperCAmelCase=False ):
def _inputs_have_equal_length(_UpperCAmelCase ):
snake_case_ = len(input[0] )
for input_slice in input[1:]:
if len(_UpperCAmelCase ) != length:
return False
return True
def _inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase ):
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(_UpperCAmelCase , _UpperCAmelCase ):
if not np.allclose(np.asarray(_UpperCAmelCase ) , np.asarray(_UpperCAmelCase ) , atol=1E-3 ):
return False
return True
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common(numpify=_UpperCAmelCase )
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
snake_case_ = self.feat_extract_tester.seq_length_diff
snake_case_ = self.feat_extract_tester.max_seq_length + pad_diff
snake_case_ = self.feat_extract_tester.min_seq_length
snake_case_ = self.feat_extract_tester.batch_size
snake_case_ = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding=_UpperCAmelCase )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''longest''' )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[-1] ) )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''np''' )
snake_case_ = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_UpperCAmelCase ):
feat_extract.pad(_UpperCAmelCase , padding='''max_length''' )[input_name]
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=_UpperCAmelCase , return_tensors='''np''' )
snake_case_ = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertTrue(_inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
snake_case_ = feat_extract.pad(_UpperCAmelCase , pad_to_multiple_of=10 )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , pad_to_multiple_of=10 )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , pad_to_multiple_of=10 , max_length=_UpperCAmelCase )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , pad_to_multiple_of=10 , max_length=_UpperCAmelCase , return_tensors='''np''' , )
snake_case_ = input_a[input_name]
self.assertTrue(all(len(_UpperCAmelCase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase ) )
snake_case_ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_UpperCAmelCase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
snake_case_ = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def UpperCamelCase__ ( self , _UpperCAmelCase=False ):
def _inputs_have_equal_length(_UpperCAmelCase ):
snake_case_ = len(input[0] )
for input_slice in input[1:]:
if len(_UpperCAmelCase ) != length:
return False
return True
def _inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase ):
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(_UpperCAmelCase , _UpperCAmelCase ):
if not np.allclose(np.asarray(_UpperCAmelCase ) , np.asarray(_UpperCAmelCase ) , atol=1E-3 ):
return False
return True
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common(numpify=_UpperCAmelCase )
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , truncation=_UpperCAmelCase )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) )
snake_case_ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase ) )
# truncate to smallest with np
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' , truncation=_UpperCAmelCase , )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' )
snake_case_ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase ) )
# truncate to middle
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=_UpperCAmelCase , return_tensors='''np''' , )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=_UpperCAmelCase )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[1] ) , return_tensors='''np''' )
snake_case_ = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertTrue(_inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_UpperCAmelCase ):
feat_extract.pad(_UpperCAmelCase , truncation=_UpperCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_UpperCAmelCase ):
feat_extract.pad(_UpperCAmelCase , padding='''longest''' , truncation=_UpperCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_UpperCAmelCase ):
feat_extract.pad(_UpperCAmelCase , padding='''longest''' , truncation=_UpperCAmelCase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_UpperCAmelCase ):
feat_extract.pad(_UpperCAmelCase , padding='''max_length''' , truncation=_UpperCAmelCase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
snake_case_ = 12
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_UpperCAmelCase , truncation=_UpperCAmelCase , )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_UpperCAmelCase , )
snake_case_ = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
snake_case_ = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
snake_case_ = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase ) )
def UpperCamelCase__ ( self ):
self._check_padding(numpify=_UpperCAmelCase )
def UpperCamelCase__ ( self ):
self._check_padding(numpify=_UpperCAmelCase )
def UpperCamelCase__ ( self ):
self._check_truncation(numpify=_UpperCAmelCase )
def UpperCamelCase__ ( self ):
self._check_truncation(numpify=_UpperCAmelCase )
@require_torch
def UpperCamelCase__ ( self ):
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common()
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''np''' )[input_name]
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def UpperCamelCase__ ( self ):
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common()
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''np''' )[input_name]
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''tf''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def UpperCamelCase__ ( self ):
snake_case_ = self.feat_extract_dict
snake_case_ = True
snake_case_ = self.feature_extraction_class(**_UpperCAmelCase )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common()
snake_case_ = [len(_UpperCAmelCase ) for x in speech_inputs]
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _UpperCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self.feat_extract_dict
snake_case_ = True
snake_case_ = self.feature_extraction_class(**_UpperCAmelCase )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common()
snake_case_ = [len(_UpperCAmelCase ) for x in speech_inputs]
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
snake_case_ = min(_UpperCAmelCase )
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _UpperCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] ) | 267 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Tuple:
"""simple docstring"""
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X2_0000 and cp <= 0X2_A6DF) #
or (cp >= 0X2_A700 and cp <= 0X2_B73F) #
or (cp >= 0X2_B740 and cp <= 0X2_B81F) #
or (cp >= 0X2_B820 and cp <= 0X2_CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2_F800 and cp <= 0X2_FA1F) #
): #
return True
return False
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> List[Any]:
"""simple docstring"""
for char in word:
snake_case_ = ord(SCREAMING_SNAKE_CASE )
if not _is_chinese_char(SCREAMING_SNAKE_CASE ):
return 0
return 1
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> str:
"""simple docstring"""
snake_case_ = set()
for token in tokens:
snake_case_ = len(SCREAMING_SNAKE_CASE ) > 1 and is_chinese(SCREAMING_SNAKE_CASE )
if chinese_word:
word_set.add(SCREAMING_SNAKE_CASE )
snake_case_ = list(SCREAMING_SNAKE_CASE )
return word_list
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> List[str]:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
snake_case_ = max([len(SCREAMING_SNAKE_CASE ) for w in chinese_word_set] )
snake_case_ = bert_tokens
snake_case_ , snake_case_ = 0, len(SCREAMING_SNAKE_CASE )
while start < end:
snake_case_ = True
if is_chinese(bert_word[start] ):
snake_case_ = min(end - start , SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE , 1 , -1 ):
snake_case_ = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
snake_case_ = '''##''' + bert_word[j]
snake_case_ = start + i
snake_case_ = False
break
if single_word:
start += 1
return bert_word
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Any:
"""simple docstring"""
snake_case_ = []
for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 100 ):
snake_case_ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['''cws'''] ).cws
snake_case_ = [get_chinese_word(SCREAMING_SNAKE_CASE ) for r in res]
ltp_res.extend(SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
snake_case_ = []
for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 100 ):
snake_case_ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
snake_case_ = []
for input_ids, chinese_word in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
snake_case_ = []
for id in input_ids:
snake_case_ = bert_tokenizer._convert_id_to_token(SCREAMING_SNAKE_CASE )
input_tokens.append(SCREAMING_SNAKE_CASE )
snake_case_ = add_sub_symbol(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(SCREAMING_SNAKE_CASE ):
if token[:2] == "##":
snake_case_ = token[2:]
# save chinese tokens' pos
if len(SCREAMING_SNAKE_CASE ) == 1 and _is_chinese_char(ord(SCREAMING_SNAKE_CASE ) ):
ref_id.append(SCREAMING_SNAKE_CASE )
ref_ids.append(SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
return ref_ids
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Tuple:
"""simple docstring"""
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
snake_case_ = f.readlines()
snake_case_ = [line.strip() for line in data if len(SCREAMING_SNAKE_CASE ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
snake_case_ = LTP(args.ltp ) # faster in GPU device
snake_case_ = BertTokenizer.from_pretrained(args.bert )
snake_case_ = prepare_ref(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
snake_case_ = [json.dumps(SCREAMING_SNAKE_CASE ) + '''\n''' for ref in ref_ids]
f.writelines(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
UpperCAmelCase = parser.parse_args()
main(args) | 267 | 1 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : List[Any] = None
a__ : int = BloomTokenizerFast
a__ : Any = BloomTokenizerFast
a__ : Optional[Any] = True
a__ : Optional[int] = False
a__ : Union[str, Any] = """tokenizer_file"""
a__ : Dict = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""}
def UpperCamelCase__ ( self) -> List[str]:
super().setUp()
__UpperCamelCase :int = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''')
tokenizer.save_pretrained(self.tmpdirname)
def UpperCamelCase__ ( self , **__lowercase) -> Union[str, Any]:
kwargs.update(self.special_tokens_map)
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **__lowercase)
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Union[str, Any] = self.get_rust_tokenizer()
__UpperCamelCase :str = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
__UpperCamelCase :Optional[int] = [[2_175, 23_714, 73_173, 144_252, 2], [77, 132_619, 3_478, 368, 109_586, 35_433, 2]]
__UpperCamelCase :Dict = tokenizer.batch_encode_plus(__lowercase)['''input_ids''']
self.assertListEqual(__lowercase , __lowercase)
__UpperCamelCase :Dict = tokenizer.batch_decode(__lowercase)
self.assertListEqual(__lowercase , __lowercase)
def UpperCamelCase__ ( self , __lowercase=6) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
__UpperCamelCase :int = self.rust_tokenizer_class.from_pretrained(__lowercase , **__lowercase)
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__UpperCamelCase :str = '''This is a simple input'''
__UpperCamelCase :Optional[int] = ['''This is a simple input 1''', '''This is a simple input 2''']
__UpperCamelCase :Any = ('''This is a simple input''', '''This is a pair''')
__UpperCamelCase :Optional[Any] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(__lowercase , max_length=__lowercase)
tokenizer_r.encode_plus(__lowercase , max_length=__lowercase)
tokenizer_r.batch_encode_plus(__lowercase , max_length=__lowercase)
tokenizer_r.encode(__lowercase , max_length=__lowercase)
tokenizer_r.batch_encode_plus(__lowercase , max_length=__lowercase)
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''')
__UpperCamelCase :str = None # Hotfixing padding = None
self.assertRaises(__lowercase , tokenizer_r.encode , __lowercase , max_length=__lowercase , padding='''max_length''')
# Simple input
self.assertRaises(__lowercase , tokenizer_r.encode_plus , __lowercase , max_length=__lowercase , padding='''max_length''')
# Simple input
self.assertRaises(
__lowercase , tokenizer_r.batch_encode_plus , __lowercase , max_length=__lowercase , padding='''max_length''' , )
# Pair input
self.assertRaises(__lowercase , tokenizer_r.encode , __lowercase , max_length=__lowercase , padding='''max_length''')
# Pair input
self.assertRaises(__lowercase , tokenizer_r.encode_plus , __lowercase , max_length=__lowercase , padding='''max_length''')
# Pair input
self.assertRaises(
__lowercase , tokenizer_r.batch_encode_plus , __lowercase , max_length=__lowercase , padding='''max_length''' , )
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :int = self.get_rust_tokenizer()
__UpperCamelCase :List[str] = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=__lowercase)
__UpperCamelCase :Optional[int] = next(iter(__lowercase))['''premise'''] # pick up one data
__UpperCamelCase :Union[str, Any] = list(sample_data.values())
__UpperCamelCase :Optional[int] = list(map(tokenizer.encode , __lowercase))
__UpperCamelCase :Union[str, Any] = [tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase) for x in output_tokens]
self.assertListEqual(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> List[str]:
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map) , 1)
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]) , 1)
| 43 |
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_lowercase : List[Any] = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : str=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : str=None , ):
"""simple docstring"""
if attention_mask is None:
lowerCamelCase__ : Any =np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCamelCase__ : str =np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCamelCase__ : Dict =np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__ : str =np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__ : Dict =np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str, lowerCamelCase : Tuple, lowerCamelCase : List[str]=13, lowerCamelCase : Dict=7, lowerCamelCase : Union[str, Any]=True, lowerCamelCase : Union[str, Any]=False, lowerCamelCase : int=99, lowerCamelCase : Union[str, Any]=16, lowerCamelCase : List[str]=2, lowerCamelCase : int=4, lowerCamelCase : Tuple=4, lowerCamelCase : Optional[Any]="gelu", lowerCamelCase : List[str]=0.1, lowerCamelCase : str=0.1, lowerCamelCase : Optional[int]=32, lowerCamelCase : List[str]=2, lowerCamelCase : Tuple=1, lowerCamelCase : Optional[int]=0, lowerCamelCase : int=0.02, )-> Optional[Any]:
lowerCamelCase__ : List[str] =parent
lowerCamelCase__ : Dict =batch_size
lowerCamelCase__ : Optional[int] =seq_length
lowerCamelCase__ : Any =is_training
lowerCamelCase__ : Optional[int] =use_labels
lowerCamelCase__ : List[str] =vocab_size
lowerCamelCase__ : List[Any] =hidden_size
lowerCamelCase__ : List[Any] =num_hidden_layers
lowerCamelCase__ : Tuple =num_attention_heads
lowerCamelCase__ : List[Any] =intermediate_size
lowerCamelCase__ : Union[str, Any] =hidden_act
lowerCamelCase__ : Optional[Any] =hidden_dropout_prob
lowerCamelCase__ : Tuple =attention_probs_dropout_prob
lowerCamelCase__ : Optional[Any] =max_position_embeddings
lowerCamelCase__ : List[Any] =eos_token_id
lowerCamelCase__ : Tuple =pad_token_id
lowerCamelCase__ : Union[str, Any] =bos_token_id
lowerCamelCase__ : List[Any] =initializer_range
def snake_case ( self : Optional[Any] )-> str:
lowerCamelCase__ : Dict =np.clip(ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size ), 3, self.vocab_size )
lowerCamelCase__ : Union[str, Any] =np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1), dtype=np.intaa )), -1 )
lowerCamelCase__ : Dict =shift_tokens_right(lowerCamelCase, 1, 2 )
lowerCamelCase__ : Optional[Any] =BlenderbotConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, initializer_range=self.initializer_range, use_cache=lowerCamelCase, )
lowerCamelCase__ : List[str] =prepare_blenderbot_inputs_dict(lowerCamelCase, lowerCamelCase, lowerCamelCase )
return config, inputs_dict
def snake_case ( self : str )-> Optional[Any]:
lowerCamelCase__ , lowerCamelCase__ : Any =self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case ( self : int, lowerCamelCase : Tuple, lowerCamelCase : Dict, lowerCamelCase : Tuple )-> Optional[Any]:
lowerCamelCase__ : Union[str, Any] =20
lowerCamelCase__ : Optional[int] =model_class_name(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =model.encode(inputs_dict['''input_ids'''] )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =(
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCamelCase__ : Any =model.init_cache(decoder_input_ids.shape[0], lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Dict =jnp.ones((decoder_input_ids.shape[0], max_decoder_length), dtype='''i4''' )
lowerCamelCase__ : List[Any] =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), )
lowerCamelCase__ : int =model.decode(
decoder_input_ids[:, :-1], lowerCamelCase, decoder_attention_mask=lowerCamelCase, past_key_values=lowerCamelCase, decoder_position_ids=lowerCamelCase, )
lowerCamelCase__ : Optional[int] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype='''i4''' )
lowerCamelCase__ : Union[str, Any] =model.decode(
decoder_input_ids[:, -1:], lowerCamelCase, decoder_attention_mask=lowerCamelCase, past_key_values=outputs_cache.past_key_values, decoder_position_ids=lowerCamelCase, )
lowerCamelCase__ : int =model.decode(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Dict =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3, msg=F'''Max diff is {diff}''' )
def snake_case ( self : str, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : str )-> List[str]:
lowerCamelCase__ : List[Any] =20
lowerCamelCase__ : List[Any] =model_class_name(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =model.encode(inputs_dict['''input_ids'''] )
lowerCamelCase__ , lowerCamelCase__ : str =(
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCamelCase__ : Tuple =jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
], axis=-1, )
lowerCamelCase__ : List[str] =model.init_cache(decoder_input_ids.shape[0], lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), )
lowerCamelCase__ : List[Any] =model.decode(
decoder_input_ids[:, :-1], lowerCamelCase, decoder_attention_mask=lowerCamelCase, past_key_values=lowerCamelCase, decoder_position_ids=lowerCamelCase, )
lowerCamelCase__ : List[str] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype='''i4''' )
lowerCamelCase__ : Optional[Any] =model.decode(
decoder_input_ids[:, -1:], lowerCamelCase, past_key_values=outputs_cache.past_key_values, decoder_attention_mask=lowerCamelCase, decoder_position_ids=lowerCamelCase, )
lowerCamelCase__ : Optional[Any] =model.decode(lowerCamelCase, lowerCamelCase, decoder_attention_mask=lowerCamelCase )
lowerCamelCase__ : List[Any] =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3, msg=F'''Max diff is {diff}''' )
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
_a = 9_9
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : Union[str, Any] =np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
], dtype=np.intaa, )
lowerCamelCase__ : Any =input_ids.shape[0]
lowerCamelCase__ : Any =BlenderbotConfig(
vocab_size=self.vocab_size, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, )
return config, input_ids, batch_size
def snake_case ( self : Any )-> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] =self._get_config_and_data()
lowerCamelCase__ : int =FlaxBlenderbotForConditionalGeneration(lowerCamelCase )
lowerCamelCase__ : str =lm_model(input_ids=lowerCamelCase )
lowerCamelCase__ : List[Any] =(batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape, lowerCamelCase )
def snake_case ( self : Tuple )-> Optional[Any]:
lowerCamelCase__ : Union[str, Any] =BlenderbotConfig(
vocab_size=self.vocab_size, d_model=14, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=8, decoder_ffn_dim=8, max_position_embeddings=48, )
lowerCamelCase__ : Union[str, Any] =FlaxBlenderbotForConditionalGeneration(lowerCamelCase )
lowerCamelCase__ : List[Any] =np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]], dtype=np.intaa )
lowerCamelCase__ : Optional[Any] =np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]], dtype=np.intaa )
lowerCamelCase__ : Optional[int] =lm_model(input_ids=lowerCamelCase, decoder_input_ids=lowerCamelCase )
lowerCamelCase__ : List[str] =(*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape, lowerCamelCase )
def snake_case ( self : Union[str, Any] )-> Union[str, Any]:
lowerCamelCase__ : Optional[int] =np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=np.intaa )
lowerCamelCase__ : Optional[Any] =shift_tokens_right(lowerCamelCase, 1, 2 )
lowerCamelCase__ : str =np.equal(lowerCamelCase, 1 ).astype(np.floataa ).sum()
lowerCamelCase__ : List[str] =np.equal(lowerCamelCase, 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape, input_ids.shape )
self.assertEqual(lowerCamelCase, n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0], 2 ).all() )
@require_flax
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase , lowerCAmelCase_ ):
'''simple docstring'''
_a = True
_a = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
_a = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def snake_case ( self : Union[str, Any] )-> List[str]:
lowerCamelCase__ : str =FlaxBlenderbotModelTester(self )
def snake_case ( self : Optional[int] )-> int:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : List[str] )-> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : List[Any] )-> Tuple:
lowerCamelCase__ , lowerCamelCase__ : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase__ : List[Any] =self._prepare_for_class(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : int =model_class(lowerCamelCase )
@jax.jit
def encode_jitted(lowerCamelCase : int, lowerCamelCase : Union[str, Any]=None, **lowerCamelCase : List[str] ):
return model.encode(input_ids=lowerCamelCase, attention_mask=lowerCamelCase )
with self.subTest('''JIT Enabled''' ):
lowerCamelCase__ : Any =encode_jitted(**lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCamelCase__ : Dict =encode_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase, lowerCamelCase ):
self.assertEqual(jitted_output.shape, output.shape )
def snake_case ( self : List[str] )-> Dict:
lowerCamelCase__ , lowerCamelCase__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase__ : Optional[Any] =model_class(lowerCamelCase )
lowerCamelCase__ : List[Any] =model.encode(inputs_dict['''input_ids'''], inputs_dict['''attention_mask'''] )
lowerCamelCase__ : Optional[int] ={
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase : Union[str, Any], lowerCamelCase : Any, lowerCamelCase : Tuple ):
return model.decode(
decoder_input_ids=lowerCamelCase, decoder_attention_mask=lowerCamelCase, encoder_outputs=lowerCamelCase, )
with self.subTest('''JIT Enabled''' ):
lowerCamelCase__ : Union[str, Any] =decode_jitted(**lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCamelCase__ : Optional[Any] =decode_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase, lowerCamelCase ):
self.assertEqual(jitted_output.shape, output.shape )
@slow
def snake_case ( self : Tuple )-> Tuple:
for model_class_name in self.all_model_classes:
lowerCamelCase__ : int =model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCamelCase__ : Union[str, Any] =np.ones((1, 1) ) * model.config.eos_token_id
lowerCamelCase__ : Optional[Any] =model(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@unittest.skipUnless(jax_device != '''cpu''', '''3B test too slow on CPU.''' )
@slow
def snake_case ( self : Optional[int] )-> Tuple:
lowerCamelCase__ : List[Any] ={'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
lowerCamelCase__ : Optional[int] ={'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
lowerCamelCase__ : Tuple =FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''', from_pt=lowerCamelCase )
lowerCamelCase__ : int =BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
lowerCamelCase__ : str =['''Sam''']
lowerCamelCase__ : Union[str, Any] =tokenizer(lowerCamelCase, return_tensors='''jax''' )
lowerCamelCase__ : Tuple =model.generate(**lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : Tuple ='''Sam is a great name. It means "sun" in Gaelic.'''
lowerCamelCase__ : Union[str, Any] =tokenizer.batch_decode(lowerCamelCase, **lowerCamelCase )
assert generated_txt[0].strip() == tgt_text
| 238 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_UpperCAmelCase : int = logging.get_logger(__name__) # pylint: disable=invalid-name
_UpperCAmelCase : int = "\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)[\"depth\"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline(\"depth-estimation\")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to(\"cuda\")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to(\"cuda\")\n\n\n >>> img = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/cat.png\"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")\n\n >>> prompt = \"A robot, 4k photo\"\n >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"\n\n >>> generator = torch.Generator(device=\"cuda\").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save(\"robot_cat.png\")\n ```\n"
def A ( lowercase , lowercase , lowercase=8 ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCamelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ , A_ , ) -> Dict:
"""simple docstring"""
super().__init__()
self.register_modules(
unet=A_ , scheduler=A_ , movq=A_ , )
UpperCamelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
if latents is None:
UpperCamelCase = randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
UpperCamelCase = latents.to(A_ )
UpperCamelCase = latents * scheduler.init_noise_sigma
return latents
def __UpperCamelCase ( self , A_=0 ) -> Tuple:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCamelCase = torch.device(F'''cuda:{gpu_id}''' )
UpperCamelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A_ , A_ )
def __UpperCamelCase ( self , A_=0 ) -> Dict:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCamelCase = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=A_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCamelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCamelCase , UpperCamelCase = cpu_offload_with_hook(A_ , A_ , prev_module_hook=A_ )
# We'll offload the last model manually.
UpperCamelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(A_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A_ )
def __call__( self , A_ , A_ , A_ , A_ = 512 , A_ = 512 , A_ = 100 , A_ = 4.0 , A_ = 1 , A_ = None , A_ = None , A_ = "pil" , A_ = True , ) -> str:
"""simple docstring"""
UpperCamelCase = self._execution_device
UpperCamelCase = guidance_scale > 1.0
if isinstance(A_ , A_ ):
UpperCamelCase = torch.cat(A_ , dim=0 )
if isinstance(A_ , A_ ):
UpperCamelCase = torch.cat(A_ , dim=0 )
if isinstance(A_ , A_ ):
UpperCamelCase = torch.cat(A_ , dim=0 )
UpperCamelCase = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
UpperCamelCase = image_embeds.repeat_interleave(A_ , dim=0 )
UpperCamelCase = negative_image_embeds.repeat_interleave(A_ , dim=0 )
UpperCamelCase = hint.repeat_interleave(A_ , dim=0 )
UpperCamelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A_ )
UpperCamelCase = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=A_ )
self.scheduler.set_timesteps(A_ , device=A_ )
UpperCamelCase = self.scheduler.timesteps
UpperCamelCase = self.movq.config.latent_channels
UpperCamelCase , UpperCamelCase = downscale_height_and_width(A_ , A_ , self.movq_scale_factor )
# create initial latent
UpperCamelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , A_ , A_ , A_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase = {'image_embeds': image_embeds, 'hint': hint}
UpperCamelCase = self.unet(
sample=A_ , timestep=A_ , encoder_hidden_states=A_ , added_cond_kwargs=A_ , return_dict=A_ , )[0]
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase = noise_pred.split(latents.shape[1] , dim=1 )
UpperCamelCase , UpperCamelCase = noise_pred.chunk(2 )
UpperCamelCase , UpperCamelCase = variance_pred.chunk(2 )
UpperCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCamelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCamelCase , UpperCamelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(
A_ , A_ , A_ , generator=A_ , )[0]
# post-processing
UpperCamelCase = self.movq.decode(A_ , force_not_quantize=A_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
UpperCamelCase = image * 0.5 + 0.5
UpperCamelCase = image.clamp(0 , 1 )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
| 371 |
import os
import sys
import unittest
_UpperCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_UpperCAmelCase : Tuple = os.path.join(git_repo_path, "src", "diffusers")
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = find_backend(' if not is_torch_available():' )
self.assertEqual(A_ , 'torch' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
UpperCamelCase = find_backend(' if not (is_torch_available() and is_transformers_available()):' )
self.assertEqual(A_ , 'torch_and_transformers' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
UpperCamelCase = find_backend(
' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):' )
self.assertEqual(A_ , 'torch_and_transformers_and_onnx' )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , A_ )
self.assertIn('torch_and_transformers' , A_ )
self.assertIn('flax_and_transformers' , A_ )
self.assertIn('torch_and_transformers_and_onnx' , A_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('UNet2DModel' , objects['torch'] )
self.assertIn('FlaxUNet2DConditionModel' , objects['flax'] )
self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers'] )
self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers'] )
self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy'] )
self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx'] )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(A_ , '\nCONSTANT = None\n' )
UpperCamelCase = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
A_ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
UpperCamelCase = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n'
UpperCamelCase = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(A_ , A_ )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n'
UpperCamelCase = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , A_ )
| 110 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Dict = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 253 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__magic_name__)
class _A ( __magic_name__):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
SCREAMING_SNAKE_CASE : str = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True})
SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({'''text''': Value('''string''')})
SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({'''labels''': ClassLabel})
SCREAMING_SNAKE_CASE : str = "text"
SCREAMING_SNAKE_CASE : str = "labels"
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , _SCREAMING_SNAKE_CASE ):
raise ValueError(f"Column {self.label_column} is not a ClassLabel." )
SCREAMING_SNAKE_CASE_ : List[Any] = copy.deepcopy(self )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.label_schema.copy()
SCREAMING_SNAKE_CASE_ : List[Any] = features[self.label_column]
SCREAMING_SNAKE_CASE_ : List[Any] = label_schema
return task_template
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return {
self.text_column: "text",
self.label_column: "labels",
}
| 253 | 1 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
A_ :Optional[int] = ['''gpt2''']
A_ :List[Any] = '''gpt2'''
if is_tf_available():
class __A ( tf.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ ):
"""simple docstring"""
super().__init__()
__UpperCamelCase : str =tokenizer
__UpperCamelCase : Optional[Any] =AutoConfig.from_pretrained(UpperCamelCase_ )
__UpperCamelCase : Tuple =TFGPTaLMHeadModel.from_config(UpperCamelCase_ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =self.tokenizer(UpperCamelCase_ )
__UpperCamelCase : int =tokenized['input_ids'].to_tensor()
__UpperCamelCase : str =tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__UpperCamelCase : List[str] =self.model(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ )['logits']
return outputs
@require_tf
@require_keras_nlp
class __A ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
super().setUp()
__UpperCamelCase : Optional[int] =[GPTaTokenizer.from_pretrained(UpperCamelCase_ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__UpperCamelCase : Union[str, Any] =[TFGPTaTokenizer.from_pretrained(UpperCamelCase_ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__UpperCamelCase : Dict =[
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
__UpperCamelCase : List[str] =list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def __lowercase ( self ):
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__UpperCamelCase : int =tokenizer([test_inputs] , return_tensors='tf' )
__UpperCamelCase : str =tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__UpperCamelCase : str =python_outputs[key].numpy()
__UpperCamelCase : Tuple =tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase_ , tf.intaa ) == tf_outputs_values ) )
@slow
def __lowercase ( self ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__UpperCamelCase : Optional[Any] =tf.function(UpperCamelCase_ )
for test_inputs in self.test_sentences:
__UpperCamelCase : Dict =tf.constant(UpperCamelCase_ )
__UpperCamelCase : Any =compiled_tokenizer(UpperCamelCase_ )
__UpperCamelCase : Union[str, Any] =tf_tokenizer(UpperCamelCase_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def __lowercase ( self ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__UpperCamelCase : Any =ModelToSave(tokenizer=UpperCamelCase_ )
__UpperCamelCase : int =tf.convert_to_tensor([self.test_sentences[0]] )
__UpperCamelCase : str =model.serving(UpperCamelCase_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__UpperCamelCase : str =Path(UpperCamelCase_ ) / 'saved.model'
tf.saved_model.save(UpperCamelCase_ , UpperCamelCase_ , signatures={'serving_default': model.serving} )
__UpperCamelCase : Optional[int] =tf.saved_model.load(UpperCamelCase_ )
__UpperCamelCase : Any =loaded_model.signatures['serving_default'](UpperCamelCase_ )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def __lowercase ( self ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__UpperCamelCase : List[str] =tf.convert_to_tensor([self.test_sentences[0]] )
__UpperCamelCase : Optional[int] =tf_tokenizer(UpperCamelCase_ ) # Build model with some sample inputs
__UpperCamelCase : List[Any] =tf_tokenizer.get_config()
__UpperCamelCase : str =TFGPTaTokenizer.from_config(UpperCamelCase_ )
__UpperCamelCase : str =model_from_config(UpperCamelCase_ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def __lowercase ( self ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__UpperCamelCase : List[Any] =123123
for max_length in [3, 5, 1024]:
__UpperCamelCase : Any =tf.convert_to_tensor([self.test_sentences[0]] )
__UpperCamelCase : Optional[int] =tf_tokenizer(UpperCamelCase_ , max_length=UpperCamelCase_ )
__UpperCamelCase : Union[str, Any] =out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 355 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ :Any = logging.get_logger(__name__)
A_ :List[Any] = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] ="""decision_transformer"""
UpperCamelCase__ : str =["""past_key_values"""]
UpperCamelCase__ : Union[str, Any] ={
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCamelCase__=17 , lowerCamelCase__=4 , lowerCamelCase__=128 , lowerCamelCase__=4096 , lowerCamelCase__=True , lowerCamelCase__=1 , lowerCamelCase__=1024 , lowerCamelCase__=3 , lowerCamelCase__=1 , lowerCamelCase__=None , lowerCamelCase__="relu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=1E-5 , lowerCamelCase__=0.02 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=50256 , lowerCamelCase__=50256 , lowerCamelCase__=False , lowerCamelCase__=False , **lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : str =state_dim
__UpperCamelCase : List[Any] =act_dim
__UpperCamelCase : Any =hidden_size
__UpperCamelCase : Union[str, Any] =max_ep_len
__UpperCamelCase : Optional[int] =action_tanh
__UpperCamelCase : Tuple =vocab_size
__UpperCamelCase : Any =n_positions
__UpperCamelCase : Optional[Any] =n_layer
__UpperCamelCase : List[str] =n_head
__UpperCamelCase : Union[str, Any] =n_inner
__UpperCamelCase : List[Any] =activation_function
__UpperCamelCase : Tuple =resid_pdrop
__UpperCamelCase : List[str] =embd_pdrop
__UpperCamelCase : Tuple =attn_pdrop
__UpperCamelCase : Dict =layer_norm_epsilon
__UpperCamelCase : Any =initializer_range
__UpperCamelCase : Tuple =scale_attn_weights
__UpperCamelCase : List[Any] =use_cache
__UpperCamelCase : List[str] =scale_attn_by_inverse_layer_idx
__UpperCamelCase : Any =reorder_and_upcast_attn
__UpperCamelCase : Tuple =bos_token_id
__UpperCamelCase : Optional[int] =eos_token_id
super().__init__(bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
| 245 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.