code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def _A ( snake_case ) -> List[Any]:
_lowercase : List[str] = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
_lowercase : Optional[Any] = re.match(r"^mobilenet_v1_([^_]*)_([^_]*)$" , lowercase__ )
if matches:
_lowercase : Optional[Any] = float(matches[1] )
_lowercase : Optional[Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
_lowercase : Optional[Any] = 10_01
_lowercase : Dict = "imagenet-1k-id2label.json"
_lowercase : Optional[int] = "huggingface/label-files"
_lowercase : List[Any] = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="dataset" ) , "r" ) )
_lowercase : Optional[Any] = {int(lowercase__ ) + 1: v for k, v in idalabel.items()}
_lowercase : Dict = "background"
_lowercase : int = idalabel
_lowercase : str = {v: k for k, v in idalabel.items()}
return config
def _A ( ) -> Tuple:
_lowercase : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowercase : Union[str, Any] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def _A ( snake_case , snake_case , snake_case , snake_case=False ) -> List[str]:
_lowercase : List[str] = get_mobilenet_va_config(lowercase__ )
# Load 🤗 model
_lowercase : str = MobileNetVaForImageClassification(lowercase__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowercase__ , lowercase__ , lowercase__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
_lowercase : str = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 32} , )
_lowercase : Union[str, Any] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowercase : str = model(**lowercase__ )
_lowercase : Any = outputs.logits
assert logits.shape == (1, 10_01)
if model_name == "mobilenet_v1_1.0_224":
_lowercase : Tuple = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
_lowercase : Optional[Any] = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
_lowercase : List[Any] = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowercase__ , atol=1E-4 )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase__ )
if push_to_hub:
print("Pushing to the hub..." )
_lowercase : str = "google/" + model_name
image_processor.push_to_hub(lowercase__ )
model.push_to_hub(lowercase__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='mobilenet_v1_1.0_224',
type=str,
help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.',
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_snake_case = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 245 |
from __future__ import annotations
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , a % b )
UpperCAmelCase_ =a // b
return (y, x - k * y)
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , lowercase__ )
UpperCAmelCase_ =na * na
UpperCAmelCase_ =ra * x * na + ra * y * na
return (n % m + m) % m
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , lowercase__ )
if b < 0:
UpperCAmelCase_ =(b % n + n) % n
return b
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =invert_modulo(lowercase__ , lowercase__ ), invert_modulo(lowercase__ , lowercase__ )
UpperCAmelCase_ =na * na
UpperCAmelCase_ =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="""chinese_remainder_theorem""", verbose=True)
testmod(name="""chinese_remainder_theorem2""", verbose=True)
testmod(name="""invert_modulo""", verbose=True)
testmod(name="""extended_euclid""", verbose=True)
| 54 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"""speechbrain/m-ctc-t-large""": """https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json""",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class UpperCAmelCase__ ( __lowercase ):
'''simple docstring'''
UpperCAmelCase_ = '''mctct'''
def __init__( self : Optional[Any] , UpperCamelCase : Union[str, Any]=80_65 , UpperCamelCase : int=15_36 , UpperCamelCase : int=36 , UpperCamelCase : Union[str, Any]=61_44 , UpperCamelCase : Optional[Any]=4 , UpperCamelCase : int=3_84 , UpperCamelCase : List[str]=9_20 , UpperCamelCase : Tuple=1E-5 , UpperCamelCase : Optional[int]=0.3 , UpperCamelCase : Optional[int]="relu" , UpperCamelCase : List[str]=0.02 , UpperCamelCase : Tuple=0.3 , UpperCamelCase : str=0.3 , UpperCamelCase : Optional[Any]=1 , UpperCamelCase : Optional[int]=0 , UpperCamelCase : Tuple=2 , UpperCamelCase : Optional[Any]=1 , UpperCamelCase : Optional[Any]=0.3 , UpperCamelCase : Tuple=1 , UpperCamelCase : List[str]=(7,) , UpperCamelCase : int=(3,) , UpperCamelCase : Optional[Any]=80 , UpperCamelCase : str=1 , UpperCamelCase : List[str]=None , UpperCamelCase : Optional[Any]="sum" , UpperCamelCase : Any=False , **UpperCamelCase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**_lowerCAmelCase , pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
_lowercase : str = vocab_size
_lowercase : int = hidden_size
_lowercase : int = num_hidden_layers
_lowercase : int = intermediate_size
_lowercase : Dict = num_attention_heads
_lowercase : Dict = attention_head_dim
_lowercase : Tuple = max_position_embeddings
_lowercase : int = layer_norm_eps
_lowercase : Optional[int] = layerdrop
_lowercase : Optional[Any] = hidden_act
_lowercase : Dict = initializer_range
_lowercase : Tuple = hidden_dropout_prob
_lowercase : Tuple = attention_probs_dropout_prob
_lowercase : Optional[Any] = pad_token_id
_lowercase : List[Any] = bos_token_id
_lowercase : Dict = eos_token_id
_lowercase : Any = conv_glu_dim
_lowercase : Dict = conv_dropout
_lowercase : Any = num_conv_layers
_lowercase : Optional[Any] = input_feat_per_channel
_lowercase : Optional[int] = input_channels
_lowercase : List[Any] = conv_channels
_lowercase : int = ctc_loss_reduction
_lowercase : List[Any] = ctc_zero_infinity
# prevents config testing fail with exporting to json
_lowercase : Optional[Any] = list(_lowerCAmelCase )
_lowercase : Optional[Any] = list(_lowerCAmelCase )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '''
F'but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '
F'`config.num_conv_layers = {self.num_conv_layers}`.' )
| 322 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__lowercase : Tuple =logging.getLogger(__name__)
__lowercase : Optional[int] =tf.data.AUTOTUNE
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config" , type=lowercase__ , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , )
parser.add_argument(
"--tokenizer" , type=lowercase__ , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , )
parser.add_argument(
"--per_replica_batch_size" , type=lowercase__ , default=8 , help="Batch size per TPU core." , )
parser.add_argument(
"--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , )
parser.add_argument(
"--tpu_name" , type=lowercase__ , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , )
parser.add_argument(
"--tpu_zone" , type=lowercase__ , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , )
parser.add_argument(
"--gcp_project" , type=lowercase__ , help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , )
parser.add_argument(
"--train_dataset" , type=lowercase__ , help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--shuffle_buffer_size" , type=lowercase__ , default=2**1_8 , help="Size of the shuffle buffer (in samples)" , )
parser.add_argument(
"--eval_dataset" , type=lowercase__ , help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--num_epochs" , type=lowercase__ , default=1 , help="Number of epochs to train for." , )
parser.add_argument(
"--learning_rate" , type=lowercase__ , default=1E-4 , help="Learning rate to use for training." , )
parser.add_argument(
"--weight_decay_rate" , type=lowercase__ , default=1E-3 , help="Weight decay rate to use for training." , )
parser.add_argument(
"--max_length" , type=lowercase__ , default=5_1_2 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , )
parser.add_argument(
"--mlm_probability" , type=lowercase__ , default=0.15 , help="Fraction of tokens to mask during training." , )
parser.add_argument("--output_dir" , type=lowercase__ , required=lowercase__ , help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id" , type=lowercase__ , help="Model ID to upload to on the Hugging Face Hub." )
UpperCAmelCase_ =parser.parse_args()
return args
def a__ ( lowercase__ ):
'''simple docstring'''
try:
if args.tpu_name:
UpperCAmelCase_ =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
UpperCAmelCase_ =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(lowercase__ )
tf.tpu.experimental.initialize_tpu_system(lowercase__ )
return tpu
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =0
for file in file_list:
UpperCAmelCase_ =file.split("/" )[-1]
UpperCAmelCase_ =re.search(R"-\d+-(\d+)\.tfrecord" , lowercase__ ).group(1 )
UpperCAmelCase_ =int(lowercase__ )
num_samples += sample_count
return num_samples
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=None ):
'''simple docstring'''
UpperCAmelCase_ =count_samples(lowercase__ )
UpperCAmelCase_ =tf.data.Dataset.from_tensor_slices(lowercase__ )
if shuffle:
UpperCAmelCase_ =dataset.shuffle(len(lowercase__ ) )
UpperCAmelCase_ =tf.data.TFRecordDataset(lowercase__ , num_parallel_reads=lowercase__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
UpperCAmelCase_ =dataset.apply(tf.data.experimental.assert_cardinality(lowercase__ ) )
UpperCAmelCase_ =dataset.map(lowercase__ , num_parallel_calls=lowercase__ )
if shuffle:
assert shuffle_buffer_size is not None
UpperCAmelCase_ =dataset.shuffle(args.shuffle_buffer_size )
UpperCAmelCase_ =dataset.batch(lowercase__ , drop_remainder=lowercase__ )
UpperCAmelCase_ =dataset.map(lowercase__ , num_parallel_calls=lowercase__ )
UpperCAmelCase_ =dataset.prefetch(lowercase__ )
return dataset
def a__ ( lowercase__ ):
'''simple docstring'''
if not args.no_tpu:
UpperCAmelCase_ =initialize_tpu(lowercase__ )
UpperCAmelCase_ =tf.distribute.TPUStrategy(lowercase__ )
else:
UpperCAmelCase_ =tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
UpperCAmelCase_ =AutoTokenizer.from_pretrained(args.tokenizer )
UpperCAmelCase_ =AutoConfig.from_pretrained(args.pretrained_model_config )
UpperCAmelCase_ =tokenizer.vocab_size
UpperCAmelCase_ =tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) )
if not training_records:
raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' )
UpperCAmelCase_ =tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) )
if not eval_records:
raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' )
UpperCAmelCase_ =count_samples(lowercase__ )
UpperCAmelCase_ =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
UpperCAmelCase_ =steps_per_epoch * args.num_epochs
with strategy.scope():
UpperCAmelCase_ =TFAutoModelForMaskedLM.from_config(lowercase__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
UpperCAmelCase_ , UpperCAmelCase_ =create_optimizer(
num_train_steps=lowercase__ , num_warmup_steps=total_train_steps // 2_0 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowercase__ , metrics=["accuracy"] )
def decode_fn(lowercase__ ):
UpperCAmelCase_ ={
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowercase__ , lowercase__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
UpperCAmelCase_ =DataCollatorForLanguageModeling(
tokenizer=lowercase__ , mlm_probability=args.mlm_probability , mlm=lowercase__ , return_tensors="tf" )
def mask_with_collator(lowercase__ ):
# TF really needs an isin() function
UpperCAmelCase_ =(
~tf.cast(batch["attention_mask"] , tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
UpperCAmelCase_ , UpperCAmelCase_ =data_collator.tf_mask_tokens(
batch["input_ids"] , vocab_size=len(lowercase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase__ , )
return batch
UpperCAmelCase_ =args.per_replica_batch_size * strategy.num_replicas_in_sync
UpperCAmelCase_ =prepare_dataset(
lowercase__ , decode_fn=lowercase__ , mask_fn=lowercase__ , batch_size=lowercase__ , shuffle=lowercase__ , shuffle_buffer_size=args.shuffle_buffer_size , )
UpperCAmelCase_ =prepare_dataset(
lowercase__ , decode_fn=lowercase__ , mask_fn=lowercase__ , batch_size=lowercase__ , shuffle=lowercase__ , )
UpperCAmelCase_ =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase__ ) )
model.fit(
lowercase__ , validation_data=lowercase__ , epochs=args.num_epochs , callbacks=lowercase__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__lowercase : Union[str, Any] =parse_args()
main(args)
| 54 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( _UpperCamelCase = 50 ):
"""simple docstring"""
_lowercase: Any = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 353 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A :
@staticmethod
def lowerCAmelCase__ ( *_lowerCAmelCase: List[Any] , **_lowerCAmelCase: List[str] ) -> List[str]:
'''simple docstring'''
pass
@is_pipeline_test
@require_torch
@require_vision
class A ( unittest.TestCase ):
_snake_case =MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: List[str] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
UpperCAmelCase_ =[
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: str ) -> int:
'''simple docstring'''
UpperCAmelCase_ =vqa_pipeline(_lowerCAmelCase , top_k=1 )
self.assertEqual(
_lowerCAmelCase , [
[{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}],
[{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}],
] , )
@require_torch
def lowerCAmelCase__ ( self: Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
UpperCAmelCase_ ="./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ ="How many cats are there?"
UpperCAmelCase_ =vqa_pipeline(image=_lowerCAmelCase , question="How many cats are there?" , top_k=2 )
self.assertEqual(
_lowerCAmelCase , [{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}, {"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}] )
UpperCAmelCase_ =vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
_lowerCAmelCase , [{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}, {"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}] )
@slow
@require_torch
def lowerCAmelCase__ ( self: List[str] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
UpperCAmelCase_ ="./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ ="How many cats are there?"
UpperCAmelCase_ =vqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
UpperCAmelCase_ =vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
UpperCAmelCase_ =vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [[{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def lowerCAmelCase__ ( self: int ) -> List[str]:
'''simple docstring'''
pass
| 54 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A = logging.get_logger(__name__)
__A = {
"""shi-labs/dinat-mini-in1k-224""": """https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json""",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class _A ( __lowercase , __lowercase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = 'dinat'
lowerCamelCase : Any = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any]=4 , __SCREAMING_SNAKE_CASE : Dict=3 , __SCREAMING_SNAKE_CASE : List[Any]=64 , __SCREAMING_SNAKE_CASE : List[str]=[3, 4, 6, 5] , __SCREAMING_SNAKE_CASE : Union[str, Any]=[2, 4, 8, 16] , __SCREAMING_SNAKE_CASE : List[Any]=7 , __SCREAMING_SNAKE_CASE : List[str]=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , __SCREAMING_SNAKE_CASE : Tuple=3.0 , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , __SCREAMING_SNAKE_CASE : int=0.0 , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : int=0.02 , __SCREAMING_SNAKE_CASE : str=1e-5 , __SCREAMING_SNAKE_CASE : List[str]=0.0 , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : Dict , ) -> int:
super().__init__(**_lowerCAmelCase )
__UpperCAmelCase =patch_size
__UpperCAmelCase =num_channels
__UpperCAmelCase =embed_dim
__UpperCAmelCase =depths
__UpperCAmelCase =len(_lowerCAmelCase )
__UpperCAmelCase =num_heads
__UpperCAmelCase =kernel_size
__UpperCAmelCase =dilations
__UpperCAmelCase =mlp_ratio
__UpperCAmelCase =qkv_bias
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =drop_path_rate
__UpperCAmelCase =hidden_act
__UpperCAmelCase =layer_norm_eps
__UpperCAmelCase =initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__UpperCAmelCase =int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
__UpperCAmelCase =layer_scale_init_value
__UpperCAmelCase =["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(_lowerCAmelCase ) + 1 )]
__UpperCAmelCase , __UpperCAmelCase =get_aligned_output_features_output_indices(
out_features=_lowerCAmelCase , out_indices=_lowerCAmelCase , stage_names=self.stage_names )
| 68 |
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
UpperCAmelCase_ =[p / w for p, w in zip(lowercase__ , lowercase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
UpperCAmelCase_ =sorted(lowercase__ )
# declaring useful variables
UpperCAmelCase_ =len(lowercase__ )
UpperCAmelCase_ =0
UpperCAmelCase_ =0
UpperCAmelCase_ =0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
UpperCAmelCase_ =sorted_profit_by_weight[length - i - 1]
UpperCAmelCase_ =profit_by_weight.index(lowercase__ )
UpperCAmelCase_ =-1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
__lowercase : List[str] =[int(x) for x in input("""Input profits separated by spaces: """).split()]
__lowercase : Union[str, Any] =[int(x) for x in input("""Input weights separated by spaces: """).split()]
__lowercase : Tuple =int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 54 | 0 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
SCREAMING_SNAKE_CASE__ : Dict = {
"""iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""",
"""iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""",
"""iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""",
"""mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""",
"""mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""",
"""mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""",
"""mask_downscaling.0""": """mask_embed.conv1""",
"""mask_downscaling.1""": """mask_embed.layer_norm1""",
"""mask_downscaling.3""": """mask_embed.conv2""",
"""mask_downscaling.4""": """mask_embed.layer_norm2""",
"""mask_downscaling.6""": """mask_embed.conv3""",
"""point_embeddings""": """point_embed""",
"""pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""",
"""image_encoder""": """vision_encoder""",
"""neck.0""": """neck.conv1""",
"""neck.1""": """neck.layer_norm1""",
"""neck.2""": """neck.conv2""",
"""neck.3""": """neck.layer_norm2""",
"""patch_embed.proj""": """patch_embed.projection""",
""".norm""": """.layer_norm""",
"""blocks""": """layers""",
}
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Dict = {}
state_dict.pop('''pixel_mean''', lowercase__ )
state_dict.pop('''pixel_std''', lowercase__ )
__magic_name__ :List[str] = R'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'''
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__magic_name__ :str = key.replace(lowercase__, lowercase__ )
if re.match(lowercase__, lowercase__ ):
__magic_name__ :str = int(re.match(lowercase__, lowercase__ ).group(2 ) )
if layer_nb == 0:
__magic_name__ :Optional[Any] = key.replace('''layers.0''', '''proj_in''' )
elif layer_nb == 1:
__magic_name__ :Optional[Any] = key.replace('''layers.1''', '''layers.0''' )
elif layer_nb == 2:
__magic_name__ :List[Any] = key.replace('''layers.2''', '''proj_out''' )
__magic_name__ :List[str] = value
__magic_name__ :str = model_state_dict[
'''prompt_encoder.shared_embedding.positional_embedding'''
]
return model_state_dict
def __lowercase ( snake_case, snake_case, snake_case, snake_case="ybelkada/segment-anything" ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = hf_hub_download(lowercase__, f'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
__magic_name__ :List[Any] = SamConfig()
elif "sam_vit_l" in model_name:
__magic_name__ :Optional[Any] = SamVisionConfig(
hidden_size=1_0_2_4, num_hidden_layers=2_4, num_attention_heads=1_6, global_attn_indexes=[5, 1_1, 1_7, 2_3], )
__magic_name__ :str = SamConfig(
vision_config=lowercase__, )
elif "sam_vit_h" in model_name:
__magic_name__ :Union[str, Any] = SamVisionConfig(
hidden_size=1_2_8_0, num_hidden_layers=3_2, num_attention_heads=1_6, global_attn_indexes=[7, 1_5, 2_3, 3_1], )
__magic_name__ :Union[str, Any] = SamConfig(
vision_config=lowercase__, )
__magic_name__ :Union[str, Any] = torch.load(lowercase__, map_location='''cpu''' )
__magic_name__ :Optional[int] = replace_keys(lowercase__ )
__magic_name__ :int = SamImageProcessor()
__magic_name__ :List[Any] = SamProcessor(image_processor=lowercase__ )
__magic_name__ :str = SamModel(lowercase__ )
hf_model.load_state_dict(lowercase__ )
__magic_name__ :List[str] = hf_model.to('''cuda''' )
__magic_name__ :Tuple = '''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'''
__magic_name__ :List[Any] = Image.open(requests.get(lowercase__, stream=lowercase__ ).raw ).convert('''RGB''' )
__magic_name__ :int = [[[4_0_0, 6_5_0]]]
__magic_name__ :Optional[Any] = [[1]]
__magic_name__ :Any = processor(images=np.array(lowercase__ ), return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
__magic_name__ :Union[str, Any] = hf_model(**lowercase__ )
__magic_name__ :Optional[int] = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
__magic_name__ :Tuple = processor(
images=np.array(lowercase__ ), input_points=lowercase__, input_labels=lowercase__, return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
__magic_name__ :Dict = hf_model(**lowercase__ )
__magic_name__ :Union[str, Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
__magic_name__ :List[Any] = ((7_5, 2_7_5, 1_7_2_5, 8_5_0),)
__magic_name__ :int = processor(images=np.array(lowercase__ ), input_boxes=lowercase__, return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
__magic_name__ :Optional[int] = hf_model(**lowercase__ )
__magic_name__ :List[Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
__magic_name__ :Optional[int] = [[[4_0_0, 6_5_0], [8_0_0, 6_5_0]]]
__magic_name__ :Union[str, Any] = [[1, 1]]
__magic_name__ :List[Any] = processor(
images=np.array(lowercase__ ), input_points=lowercase__, input_labels=lowercase__, return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
__magic_name__ :int = hf_model(**lowercase__ )
__magic_name__ :int = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
SCREAMING_SNAKE_CASE__ : List[Any] = ["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""]
parser.add_argument(
"""--model_name""",
default="""sam_vit_h_4b8939""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
parser.add_argument(
"""--model_hub_id""",
default="""ybelkada/segment-anything""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowercase : Dict ={
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any =["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] =[
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] =[
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__lowercase : Union[str, Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 54 | 0 |
from __future__ import annotations
from typing import Any
def __UpperCAmelCase ( UpperCAmelCase )-> Tuple:
"""simple docstring"""
if not postfix_notation:
return 0
lowercase = {'''+''', '''-''', '''*''', '''/'''}
lowercase = []
for token in postfix_notation:
if token in operations:
lowercase ,lowercase = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(lowercase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 604 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a__ ( lowercase__ , lowercase__ , lowercase__=1_0_2_4 , lowercase__=1_0_2_4 , lowercase__=False , **lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =AutoTokenizer.from_pretrained(lowercase__ )
UpperCAmelCase_ =SeqaSeqDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ , type_path="train" , **lowercase__ )
UpperCAmelCase_ =tok.pad_token_id
def get_lens(lowercase__ ):
UpperCAmelCase_ =tqdm(
DataLoader(lowercase__ , batch_size=5_1_2 , num_workers=8 , shuffle=lowercase__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCAmelCase_ =[]
for batch in dl:
UpperCAmelCase_ =batch["input_ids"].ne(lowercase__ ).sum(1 ).tolist()
UpperCAmelCase_ =batch["labels"].ne(lowercase__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowercase__ , lowercase__ ):
max_lens.append(max(lowercase__ , lowercase__ ) )
else:
max_lens.extend(lowercase__ )
return max_lens
UpperCAmelCase_ =get_lens(lowercase__ )
UpperCAmelCase_ =SeqaSeqDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ , type_path="val" , **lowercase__ )
UpperCAmelCase_ =get_lens(lowercase__ )
pickle_save(lowercase__ , train_ds.len_file )
pickle_save(lowercase__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 54 | 0 |
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = set()
# edges = list of graph's edges
_UpperCamelCase : Dict = get_edges(lowercase__ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_UpperCamelCase, _UpperCamelCase : int = edges.pop()
chosen_vertices.add(lowercase__ )
chosen_vertices.add(lowercase__ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowercase__ )
return chosen_vertices
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Dict = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 624 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A :
def __init__( self: Any , _lowerCAmelCase: str , _lowerCAmelCase: Optional[Any]=13 , _lowerCAmelCase: List[str]=30 , _lowerCAmelCase: List[Any]=2 , _lowerCAmelCase: List[str]=3 , _lowerCAmelCase: Dict=True , _lowerCAmelCase: int=True , _lowerCAmelCase: Tuple=32 , _lowerCAmelCase: str=2 , _lowerCAmelCase: Dict=4 , _lowerCAmelCase: Dict=37 , _lowerCAmelCase: Optional[Any]="gelu" , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: Union[str, Any]=10 , _lowerCAmelCase: str=0.02 , _lowerCAmelCase: Optional[Any]=3 , _lowerCAmelCase: Optional[int]=None , ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =image_size
UpperCAmelCase_ =patch_size
UpperCAmelCase_ =num_channels
UpperCAmelCase_ =is_training
UpperCAmelCase_ =use_labels
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =num_attention_heads
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =type_sequence_label_size
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ =(image_size // patch_size) ** 2
UpperCAmelCase_ =num_patches + 1
def lowerCAmelCase__ ( self: Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ =None
if self.use_labels:
UpperCAmelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ =self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: int , _lowerCAmelCase: Any , _lowerCAmelCase: List[str] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =TFViTModel(config=_lowerCAmelCase )
UpperCAmelCase_ =model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ =self.image_size // 2
UpperCAmelCase_ =pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ =model(_lowerCAmelCase , interpolate_pos_encoding=_lowerCAmelCase , training=_lowerCAmelCase )
UpperCAmelCase_ =(image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.type_sequence_label_size
UpperCAmelCase_ =TFViTForImageClassification(_lowerCAmelCase )
UpperCAmelCase_ =model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ =self.image_size // 2
UpperCAmelCase_ =pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ =model(_lowerCAmelCase , interpolate_pos_encoding=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ =1
UpperCAmelCase_ =TFViTForImageClassification(_lowerCAmelCase )
UpperCAmelCase_ =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ =model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ ={"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class A ( __lowercase , __lowercase , unittest.TestCase ):
_snake_case =(TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
_snake_case =(
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
_snake_case =False
_snake_case =False
_snake_case =False
def lowerCAmelCase__ ( self: int ) -> int:
'''simple docstring'''
UpperCAmelCase_ =TFViTModelTester(self )
UpperCAmelCase_ =ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowerCAmelCase__ ( self: Dict ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowerCAmelCase__ ( self: int ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: List[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ =model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase_ =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , tf.keras.layers.Layer ) )
def lowerCAmelCase__ ( self: List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ =model_class(_lowerCAmelCase )
UpperCAmelCase_ =inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ =[*signature.parameters.keys()]
UpperCAmelCase_ =["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def lowerCAmelCase__ ( self: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def lowerCAmelCase__ ( self: Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =TFViTModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(_lowerCAmelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class A ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self: Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" )
UpperCAmelCase_ =self.default_image_processor
UpperCAmelCase_ =prepare_img()
UpperCAmelCase_ =image_processor(images=_lowerCAmelCase , return_tensors="tf" )
# forward pass
UpperCAmelCase_ =model(**_lowerCAmelCase )
# verify the logits
UpperCAmelCase_ =tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
UpperCAmelCase_ =tf.constant([-0.27_44, 0.82_15, -0.08_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 )
| 54 | 0 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
__lowerCAmelCase : str ="""
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
__lowerCAmelCase : Optional[Any] ="""
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
__lowerCAmelCase : Dict =R"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def A__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ):
"""simple docstring"""
lowercase = spearmanr(_lowerCAmelCase , _lowerCAmelCase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 359 |
from __future__ import annotations
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) == 0:
return False
UpperCAmelCase_ =len(lowercase__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , lowercase__ )
else:
return binary_search(a_list[midpoint + 1 :] , lowercase__ )
if __name__ == "__main__":
__lowercase : Tuple =input("""Enter numbers separated by comma:\n""").strip()
__lowercase : Optional[Any] =[int(item.strip()) for item in user_input.split(""",""")]
__lowercase : List[Any] =int(input("""Enter the number to be found in the list:\n""").strip())
__lowercase : Optional[Any] ="""""" if binary_search(sequence, target) else """not """
print(f"""{target} was {not_str}found in {sequence}""")
| 54 | 0 |
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCamelCase (__lowercase ):
def __init__( self : int , *__UpperCAmelCase : List[Any] , __UpperCAmelCase : Any=None , __UpperCAmelCase : Union[str, Any]=None , **__UpperCAmelCase : Any ) -> List[str]:
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = eval_examples
SCREAMING_SNAKE_CASE__ = post_process_function
def SCREAMING_SNAKE_CASE ( self : str , __UpperCAmelCase : Optional[Dataset] = None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[List[str]] = None , __UpperCAmelCase : str = "eval" , **__UpperCAmelCase : Any , ) -> Dict[str, float]:
SCREAMING_SNAKE_CASE__ = gen_kwargs.copy()
SCREAMING_SNAKE_CASE__ = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
SCREAMING_SNAKE_CASE__ = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
SCREAMING_SNAKE_CASE__ = gen_kwargs
SCREAMING_SNAKE_CASE__ = self.eval_dataset if eval_dataset is None else eval_dataset
SCREAMING_SNAKE_CASE__ = self.get_eval_dataloader(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE__ = self.compute_metrics
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = time.time()
SCREAMING_SNAKE_CASE__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE__ = eval_loop(
_lowerCAmelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCAmelCase , metric_key_prefix=_lowerCAmelCase , )
finally:
SCREAMING_SNAKE_CASE__ = compute_metrics
SCREAMING_SNAKE_CASE__ = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_lowerCAmelCase , _lowerCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
SCREAMING_SNAKE_CASE__ = self.post_process_function(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = self.compute_metrics(_lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
SCREAMING_SNAKE_CASE__ = metrics.pop(_lowerCAmelCase )
metrics.update(output.metrics )
else:
SCREAMING_SNAKE_CASE__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_lowerCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
SCREAMING_SNAKE_CASE__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowerCAmelCase )
return metrics
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : str = "test" , **__UpperCAmelCase : Tuple ) -> Any:
SCREAMING_SNAKE_CASE__ = gen_kwargs.copy()
SCREAMING_SNAKE_CASE__ = self.get_test_dataloader(_lowerCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE__ = self.compute_metrics
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = time.time()
SCREAMING_SNAKE_CASE__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE__ = eval_loop(
_lowerCAmelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCAmelCase , metric_key_prefix=_lowerCAmelCase , )
finally:
SCREAMING_SNAKE_CASE__ = compute_metrics
SCREAMING_SNAKE_CASE__ = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_lowerCAmelCase , _lowerCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
SCREAMING_SNAKE_CASE__ = self.post_process_function(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , """predict""" )
SCREAMING_SNAKE_CASE__ = self.compute_metrics(_lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
SCREAMING_SNAKE_CASE__ = metrics.pop(_lowerCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowerCAmelCase )
| 196 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__lowercase : Any =(
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
__lowercase : Union[str, Any] =(
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
__lowercase : List[str] =(
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
__lowercase : str =(
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
__lowercase : Union[str, Any] =(
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 14]),
("""2H 5D 3C AS 5S""", False, [14, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [14, 13, 12, 11, 10]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
__lowercase : str =(
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
__lowercase : int =(
("""JH AH TH KH QH""", 23),
("""JH 9H TH KH QH""", 22),
("""JC KH JS JD JH""", 21),
("""KH KC 3S 3H 3D""", 20),
("""8C 9C 5C 3C TC""", 19),
("""JS QS 9H TS KH""", 18),
("""7C 7S KH 2H 7H""", 17),
("""3C KH 5D 5S KH""", 16),
("""QH 8H KD JH 8S""", 15),
("""2D 6D 9D TH 7D""", 14),
)
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =randrange(len(lowercase__ ) ), randrange(len(lowercase__ ) )
UpperCAmelCase_ =["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)]
UpperCAmelCase_ , UpperCAmelCase_ =SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def a__ ( lowercase__ = 1_0_0 ):
'''simple docstring'''
return (generate_random_hand() for _ in range(lowercase__ ))
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_flush() == expected
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_straight() == expected
@pytest.mark.parametrize("hand, expected, card_values" , lowercase__ )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =PokerHand(lowercase__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_same_kind() == expected
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._hand_type == expected
@pytest.mark.parametrize("hand, other, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
@pytest.mark.parametrize("hand, other, expected" , generate_random_hands() )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[PokerHand(lowercase__ ) for hand in SORTED_HANDS]
UpperCAmelCase_ =poker_hands.copy()
shuffle(lowercase__ )
UpperCAmelCase_ =chain(sorted(lowercase__ ) )
for index, hand in enumerate(lowercase__ ):
assert hand == poker_hands[index]
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[PokerHand("2D AC 3H 4H 5S" ), PokerHand("2S 3H 4H 5S 6C" )]
pokerhands.sort(reverse=lowercase__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =PokerHand("2C 4S AS 3D 5C" )
UpperCAmelCase_ =True
UpperCAmelCase_ =[5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =0
UpperCAmelCase_ =os.path.abspath(os.path.dirname(lowercase__ ) )
UpperCAmelCase_ =os.path.join(lowercase__ , "poker_hands.txt" )
with open(lowercase__ ) as file_hand:
for line in file_hand:
UpperCAmelCase_ =line[:1_4].strip()
UpperCAmelCase_ =line[1_5:].strip()
UpperCAmelCase_ , UpperCAmelCase_ =PokerHand(lowercase__ ), PokerHand(lowercase__ )
UpperCAmelCase_ =player.compare_with(lowercase__ )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 54 | 0 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = os.path.join(args.tf_model_dir , 'parameters.json' )
_lowerCAmelCase : Optional[Any] = json.loads(open(lowercase__ ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith('.pt' ):
_lowerCAmelCase : str = args.output + '.pt'
_lowerCAmelCase : List[str] = OrderedDict()
with tf.device('/CPU:0' ):
_lowerCAmelCase : Optional[int] = tf.train.load_checkpoint(args.tf_model_dir )
_lowerCAmelCase : Optional[int] = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_lowerCAmelCase : Optional[int] = reader.get_tensor(lowercase__ ).astype(np.floataa )
if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ):
continue
if key_name.startswith('pasts/' ):
if key_name.startswith('pasts/mlp' ):
_lowerCAmelCase : List[str] = int(key_name[9] )
elif key_name.startswith('pasts/out' ):
_lowerCAmelCase : Union[str, Any] = 8
_lowerCAmelCase : List[str] = 'model.sqout.%d.weight' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_lowerCAmelCase : Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : Optional[Any] = torch.tensor(lowercase__ )
elif key_name.startswith('model/moe' ):
_lowerCAmelCase : List[Any] = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/switch_gating/kernel' ):
_lowerCAmelCase : int = 'model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player
_lowerCAmelCase : Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : Tuple = torch.tensor(lowercase__ )
elif key_name.endswith('/softmlp/kernel' ):
_lowerCAmelCase : Dict = 'model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player
_lowerCAmelCase : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : Optional[Any] = torch.tensor(lowercase__ )
elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ):
_lowerCAmelCase : List[str] = key_name[-9:-7]
for i in range(16 ):
_lowerCAmelCase : str = 'model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer)
_lowerCAmelCase : Tuple = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_lowerCAmelCase : List[Any] = torch.tensor(lowercase__ )
elif key_name.startswith('model/mlp' ):
_lowerCAmelCase : Any = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/p1/kernel' ):
_lowerCAmelCase : Union[str, Any] = 'model.blocks.%d.feed_forward.mlp.wi.weight' % player
_lowerCAmelCase : Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : Dict = torch.tensor(lowercase__ )
elif key_name.endswith('/p1/bias' ):
_lowerCAmelCase : List[str] = 'model.blocks.%d.feed_forward.mlp.wi.bias' % player
_lowerCAmelCase : List[str] = vnp.copy() # same because it is one dimensional
_lowerCAmelCase : Optional[int] = torch.tensor(lowercase__ )
elif key_name.endswith('/p2/kernel' ):
_lowerCAmelCase : Any = 'model.blocks.%d.feed_forward.mlp.wo.weight' % player
_lowerCAmelCase : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : str = torch.tensor(lowercase__ )
elif key_name.endswith('/p2/bias' ):
_lowerCAmelCase : Tuple = 'model.blocks.%d.feed_forward.mlp.wo.bias' % player
_lowerCAmelCase : Union[str, Any] = vnp.copy() # same because it is one dimensional
_lowerCAmelCase : List[str] = torch.tensor(lowercase__ )
elif key_name.startswith('model/ln' ):
_lowerCAmelCase : List[Any] = int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase : int = 'model.blocks.%d.feed_forward.norm.bias' % player
_lowerCAmelCase : Dict = vnp.copy() # same because it is one dimensional
_lowerCAmelCase : Optional[int] = torch.tensor(lowercase__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase : str = 'model.blocks.%d.feed_forward.norm.weight' % player
_lowerCAmelCase : Tuple = vnp.copy() # same because it is one dimensional
_lowerCAmelCase : List[Any] = torch.tensor(lowercase__ )
elif key_name.startswith('model/att' ):
_lowerCAmelCase : Union[str, Any] = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/qkv/kernel' ):
_lowerCAmelCase : Dict = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_lowerCAmelCase : Dict = state[:, 0, :, :]
_lowerCAmelCase : Dict = state[:, 1, :, :]
_lowerCAmelCase : Tuple = state[:, 2, :, :]
_lowerCAmelCase : Optional[int] = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : Any = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : Tuple = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : int = 'model.blocks.%d.self_attn.self_attn.q_proj.weight' % player
_lowerCAmelCase : List[str] = torch.tensor(lowercase__ )
_lowerCAmelCase : Any = 'model.blocks.%d.self_attn.self_attn.k_proj.weight' % player
_lowerCAmelCase : str = torch.tensor(lowercase__ )
_lowerCAmelCase : Tuple = 'model.blocks.%d.self_attn.self_attn.v_proj.weight' % player
_lowerCAmelCase : Union[str, Any] = torch.tensor(lowercase__ )
elif key_name.endswith('/o/kernel' ):
_lowerCAmelCase : Optional[Any] = 'model.blocks.%d.self_attn.self_attn.out_proj.weight' % player
_lowerCAmelCase : str = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : Dict = torch.tensor(lowercase__ )
elif key_name.startswith('model/an' ):
_lowerCAmelCase : str = int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase : str = 'model.blocks.%d.self_attn.norm.bias' % player
_lowerCAmelCase : Optional[int] = vnp.copy() # same because it is one dimensional
_lowerCAmelCase : int = torch.tensor(lowercase__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase : Tuple = 'model.blocks.%d.self_attn.norm.weight' % player
_lowerCAmelCase : Optional[Any] = vnp.copy() # same because it is one dimensional
_lowerCAmelCase : Tuple = torch.tensor(lowercase__ )
elif (
key_name.startswith('model/wte' )
or key_name.startswith('model/wpe' )
or key_name.startswith('model/ete' )
):
_lowerCAmelCase : List[Any] = {'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[
key_name[-3:]
]
_lowerCAmelCase : Any = 'model.%s.weight' % nlayer
_lowerCAmelCase : Union[str, Any] = vnp.copy() # same in embedded
_lowerCAmelCase : List[str] = torch.tensor(lowercase__ )
if key_name.startswith('model/wte' ):
_lowerCAmelCase : List[Any] = 'lm_head.weight'
_lowerCAmelCase : Tuple = vnp.copy() # same in embedded
_lowerCAmelCase : Any = torch.tensor(lowercase__ )
elif key_name.startswith('model/wob' ):
_lowerCAmelCase : Optional[Any] = 'final_logits_bias'
_lowerCAmelCase : Dict = vnp.copy() # same in embedded
_lowerCAmelCase : List[Any] = state.reshape((1, -1) )
_lowerCAmelCase : Tuple = torch.tensor(lowercase__ )
elif key_name == "model/dense/kernel":
_lowerCAmelCase : Optional[int] = 'model.last_project.weight'
_lowerCAmelCase : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : str = torch.tensor(lowercase__ )
elif key_name == "model/dense_1/bias":
_lowerCAmelCase : Any = 'model.last_project.bias'
_lowerCAmelCase : Tuple = vnp.copy() # same because it is one dimensional
_lowerCAmelCase : int = torch.tensor(lowercase__ )
torch.save(lowercase__ , args.output )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser(
description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""")
parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""")
_lowerCAmelCase = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 259 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowercase : int =logging.get_logger(__name__)
class A ( __lowercase ):
_snake_case =['''pixel_values''']
def __init__( self: List[Any] , _lowerCAmelCase: bool = True , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: float = None , _lowerCAmelCase: PILImageResampling = PILImageResampling.BILINEAR , _lowerCAmelCase: bool = True , _lowerCAmelCase: Union[int, float] = 1 / 255 , _lowerCAmelCase: bool = True , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , **_lowerCAmelCase: Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
UpperCAmelCase_ =size if size is not None else {"shortest_edge": 384}
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =do_resize
UpperCAmelCase_ =size
# Default value set here for backwards compatibility where the value in config is None
UpperCAmelCase_ =crop_pct if crop_pct is not None else 224 / 256
UpperCAmelCase_ =resample
UpperCAmelCase_ =do_rescale
UpperCAmelCase_ =rescale_factor
UpperCAmelCase_ =do_normalize
UpperCAmelCase_ =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ =image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Dict[str, int] , _lowerCAmelCase: float , _lowerCAmelCase: PILImageResampling = PILImageResampling.BICUBIC , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Any , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
UpperCAmelCase_ =size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
UpperCAmelCase_ =int(shortest_edge / crop_pct )
UpperCAmelCase_ =get_resize_output_image_size(_lowerCAmelCase , size=_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_lowerCAmelCase , size=(shortest_edge, shortest_edge) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_lowerCAmelCase , size=(shortest_edge, shortest_edge) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Union[int, float] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: str , ) -> Optional[Any]:
'''simple docstring'''
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Union[float, List[float]] , _lowerCAmelCase: Union[float, List[float]] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Dict , ) -> np.ndarray:
'''simple docstring'''
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: ImageInput , _lowerCAmelCase: bool = None , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: float = None , _lowerCAmelCase: PILImageResampling = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: float = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[str, TensorType]] = None , _lowerCAmelCase: ChannelDimension = ChannelDimension.FIRST , **_lowerCAmelCase: Optional[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCAmelCase_ =do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ =crop_pct if crop_pct is not None else self.crop_pct
UpperCAmelCase_ =resample if resample is not None else self.resample
UpperCAmelCase_ =do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ =do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ =image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ =image_std if image_std is not None else self.image_std
UpperCAmelCase_ =size if size is not None else self.size
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ =[to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
UpperCAmelCase_ =[self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , crop_pct=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_rescale:
UpperCAmelCase_ =[self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
if do_normalize:
UpperCAmelCase_ =[self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images]
UpperCAmelCase_ =[to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
UpperCAmelCase_ ={"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 54 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __snake_case (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[Any] = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=10 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , )
_lowerCAmelCase : str = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return vqvae, unet
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Union[str, Any] = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
_lowerCAmelCase : int = DDPMScheduler()
_lowerCAmelCase : Optional[int] = AudioDiffusionPipeline(vqvae=_lowerCAmelCase , unet=self.dummy_unet , mel=_lowerCAmelCase , scheduler=_lowerCAmelCase )
_lowerCAmelCase : List[Any] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_lowerCAmelCase ).manual_seed(42 )
_lowerCAmelCase : Tuple = pipe(generator=_lowerCAmelCase , steps=4 )
_lowerCAmelCase : Tuple = output.audios[0]
_lowerCAmelCase : List[Any] = output.images[0]
_lowerCAmelCase : Dict = torch.Generator(device=_lowerCAmelCase ).manual_seed(42 )
_lowerCAmelCase : Union[str, Any] = pipe(generator=_lowerCAmelCase , steps=4 , return_dict=_lowerCAmelCase )
_lowerCAmelCase : Tuple = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
_lowerCAmelCase : Optional[Any] = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
_lowerCAmelCase : Optional[Any] = np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:10]
_lowerCAmelCase : Optional[int] = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
_lowerCAmelCase : Tuple = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
_lowerCAmelCase : Union[str, Any] = DDIMScheduler()
_lowerCAmelCase : List[str] = self.dummy_vqvae_and_unet
_lowerCAmelCase : List[str] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_lowerCAmelCase , scheduler=_lowerCAmelCase )
_lowerCAmelCase : Union[str, Any] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
np.random.seed(0 )
_lowerCAmelCase : Dict = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
_lowerCAmelCase : Union[str, Any] = torch.Generator(device=_lowerCAmelCase ).manual_seed(42 )
_lowerCAmelCase : int = pipe(raw_audio=_lowerCAmelCase , generator=_lowerCAmelCase , start_step=5 , steps=10 )
_lowerCAmelCase : str = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
_lowerCAmelCase : List[Any] = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
_lowerCAmelCase : Any = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
_lowerCAmelCase : Tuple = self.dummy_unet_condition
_lowerCAmelCase : List[str] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_lowerCAmelCase , mel=_lowerCAmelCase , scheduler=_lowerCAmelCase )
_lowerCAmelCase : List[Any] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
np.random.seed(0 )
_lowerCAmelCase : int = torch.rand((1, 1, 10) )
_lowerCAmelCase : str = pipe(generator=_lowerCAmelCase , encoding=_lowerCAmelCase )
_lowerCAmelCase : Tuple = output.images[0]
_lowerCAmelCase : Tuple = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
_lowerCAmelCase : int = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __snake_case (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : Tuple = torch_device
_lowerCAmelCase : Dict = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
_lowerCAmelCase : List[Any] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase : List[str] = torch.Generator(device=_lowerCAmelCase ).manual_seed(42 )
_lowerCAmelCase : int = pipe(generator=_lowerCAmelCase )
_lowerCAmelCase : Any = output.audios[0]
_lowerCAmelCase : int = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
_lowerCAmelCase : List[str] = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
_lowerCAmelCase : Union[str, Any] = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 429 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__lowercase : List[Any] =WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =test_results.split(" " )
UpperCAmelCase_ =0
UpperCAmelCase_ =0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
UpperCAmelCase_ =expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase__ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={}
UpperCAmelCase_ =None
UpperCAmelCase_ =False
for line in failures_short_lines.split("\n" ):
if re.search(R"_ \[doctest\]" , lowercase__ ):
UpperCAmelCase_ =True
UpperCAmelCase_ =line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
UpperCAmelCase_ =line
UpperCAmelCase_ =False
return failures
class A :
def __init__( self: Optional[Any] , _lowerCAmelCase: str , _lowerCAmelCase: Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =title
UpperCAmelCase_ =doc_test_results["time_spent"].split("," )[0]
UpperCAmelCase_ =doc_test_results["success"]
UpperCAmelCase_ =doc_test_results["failures"]
UpperCAmelCase_ =self.n_success + self.n_failures
# Failures and success of the modeling tests
UpperCAmelCase_ =doc_test_results
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =[self._time_spent]
UpperCAmelCase_ =0
for time in time_spent:
UpperCAmelCase_ =time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_lowerCAmelCase ) == 1:
UpperCAmelCase_ =[0, 0, time_parts[0]]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F'{int(_lowerCAmelCase )}h{int(_lowerCAmelCase )}m{int(_lowerCAmelCase )}s'
@property
def lowerCAmelCase__ ( self: int ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
F' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def lowerCAmelCase__ ( self: Tuple ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =40
UpperCAmelCase_ ={k: v["failed"] for k, v in doc_test_results.items() if isinstance(_lowerCAmelCase , _lowerCAmelCase )}
UpperCAmelCase_ =""
for category, failures in category_failures.items():
if len(_lowerCAmelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += F'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_lowerCAmelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =[self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_lowerCAmelCase )
@staticmethod
def lowerCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =[
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(_lowerCAmelCase )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=_lowerCAmelCase , )
def lowerCAmelCase__ ( self: Dict ) -> List[str]:
'''simple docstring'''
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
UpperCAmelCase_ =F'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else "All tests passed."
UpperCAmelCase_ =client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=_lowerCAmelCase , )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[str] , _lowerCAmelCase: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =""
for key, value in failures.items():
UpperCAmelCase_ =value[:200] + " [Truncated]" if len(_lowerCAmelCase ) > 250 else value
failures_text += F'*{key}*\n_{value}_\n\n'
UpperCAmelCase_ =job_name
UpperCAmelCase_ ={"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
UpperCAmelCase_ ={
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowerCAmelCase__ ( self: Any ) -> List[str]:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
UpperCAmelCase_ =self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
UpperCAmelCase_ =sorted(self.doc_test_results.items() , key=lambda _lowerCAmelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
UpperCAmelCase_ =F'*Num failures* :{len(job_result["failed"] )} \n'
UpperCAmelCase_ =job_result["failures"]
UpperCAmelCase_ =self.get_reply_blocks(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , text=_lowerCAmelCase )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=F'Results for {job}' , blocks=_lowerCAmelCase , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =os.environ["GITHUB_RUN_ID"]
UpperCAmelCase_ =F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
UpperCAmelCase_ =requests.get(lowercase__ ).json()
UpperCAmelCase_ ={}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
UpperCAmelCase_ =math.ceil((result["total_count"] - 1_0_0) / 1_0_0 )
for i in range(lowercase__ ):
UpperCAmelCase_ =requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , lowercase__ )
return {}
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={}
if os.path.exists(lowercase__ ):
UpperCAmelCase_ =os.listdir(lowercase__ )
for file in files:
try:
with open(os.path.join(lowercase__ , lowercase__ ) , encoding="utf-8" ) as f:
UpperCAmelCase_ =f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowercase__ , lowercase__ )}.' ) from e
return _artifact
def a__ ( ):
'''simple docstring'''
class A :
def __init__( self: Tuple , _lowerCAmelCase: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =name
UpperCAmelCase_ =[]
def __str__( self: Optional[int] ) -> Tuple:
'''simple docstring'''
return self.name
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: str ) -> List[Any]:
'''simple docstring'''
self.paths.append({"name": self.name, "path": path} )
UpperCAmelCase_ ={}
UpperCAmelCase_ =filter(os.path.isdir , os.listdir() )
for directory in directories:
UpperCAmelCase_ =directory
if artifact_name not in _available_artifacts:
UpperCAmelCase_ =Artifact(lowercase__ )
_available_artifacts[artifact_name].add_path(lowercase__ )
return _available_artifacts
if __name__ == "__main__":
__lowercase : str =get_job_links()
__lowercase : Dict =retrieve_available_artifacts()
__lowercase : Optional[int] =collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__lowercase : Any ={
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__lowercase : Tuple =github_actions_job_links.get("""run_doctests""")
__lowercase : int =available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
__lowercase : str =retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
__lowercase , __lowercase , __lowercase : Tuple =handle_test_results(artifact["""stats"""])
__lowercase : int =failed
__lowercase : int =success
__lowercase : str =time_spent[1:-1] + """, """
__lowercase : str =extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
__lowercase : int =line.replace("""FAILED """, """""")
__lowercase : List[Any] =line.split()[0].replace("""\n""", """""")
if "::" in line:
__lowercase , __lowercase : Any =line.split("""::""")
else:
__lowercase , __lowercase : Dict =line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__lowercase : Optional[int] =docs[file_regex]
doc_test_results[category]["failed"].append(test)
__lowercase : Tuple =all_failures[test] if test in all_failures else """N/A"""
__lowercase : Optional[int] =failure
break
__lowercase : Optional[int] =Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 54 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def _A ( snake_case ) -> Any:
_lowercase : int = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
_lowercase : int = 1_28
elif "12-12" in model_name:
_lowercase : List[str] = 12
_lowercase : Optional[Any] = 12
elif "14-14" in model_name:
_lowercase : Dict = 14
_lowercase : List[Any] = 14
elif "16-16" in model_name:
_lowercase : Any = 16
_lowercase : Dict = 16
else:
raise ValueError("Model not supported" )
_lowercase : Tuple = "huggingface/label-files"
if "speech-commands" in model_name:
_lowercase : int = 35
_lowercase : Optional[int] = "speech-commands-v2-id2label.json"
else:
_lowercase : int = 5_27
_lowercase : Union[str, Any] = "audioset-id2label.json"
_lowercase : Union[str, Any] = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="dataset" ) , "r" ) )
_lowercase : Union[str, Any] = {int(lowercase__ ): v for k, v in idalabel.items()}
_lowercase : Optional[Any] = idalabel
_lowercase : Dict = {v: k for k, v in idalabel.items()}
return config
def _A ( snake_case ) -> Tuple:
if "module.v" in name:
_lowercase : Tuple = name.replace("module.v" , "audio_spectrogram_transformer" )
if "cls_token" in name:
_lowercase : Tuple = name.replace("cls_token" , "embeddings.cls_token" )
if "dist_token" in name:
_lowercase : Dict = name.replace("dist_token" , "embeddings.distillation_token" )
if "pos_embed" in name:
_lowercase : Union[str, Any] = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
_lowercase : Any = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
# transformer blocks
if "blocks" in name:
_lowercase : int = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
_lowercase : Optional[int] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
_lowercase : int = name.replace("attn" , "attention.self" )
if "norm1" in name:
_lowercase : List[str] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
_lowercase : Any = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
_lowercase : Optional[int] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
_lowercase : List[str] = name.replace("mlp.fc2" , "output.dense" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
_lowercase : Optional[int] = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" )
# classifier head
if "module.mlp_head.0" in name:
_lowercase : str = name.replace("module.mlp_head.0" , "classifier.layernorm" )
if "module.mlp_head.1" in name:
_lowercase : Any = name.replace("module.mlp_head.1" , "classifier.dense" )
return name
def _A ( snake_case , snake_case ) -> Dict:
for key in orig_state_dict.copy().keys():
_lowercase : str = orig_state_dict.pop(lowercase__ )
if "qkv" in key:
_lowercase : int = key.split("." )
_lowercase : Tuple = int(key_split[3] )
_lowercase : Dict = config.hidden_size
if "weight" in key:
_lowercase : Optional[int] = val[:dim, :]
_lowercase : Optional[Any] = val[dim : dim * 2, :]
_lowercase : Dict = val[-dim:, :]
else:
_lowercase : Optional[Any] = val[:dim]
_lowercase : Optional[int] = val[dim : dim * 2]
_lowercase : int = val[-dim:]
else:
_lowercase : Optional[Any] = val
return orig_state_dict
def _A ( snake_case ) -> List[str]:
_lowercase : str = [
"module.v.head.weight",
"module.v.head.bias",
"module.v.head_dist.weight",
"module.v.head_dist.bias",
]
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
@torch.no_grad()
def _A ( snake_case , snake_case , snake_case=False ) -> int:
_lowercase : Optional[int] = get_audio_spectrogram_transformer_config(lowercase__ )
_lowercase : str = {
"ast-finetuned-audioset-10-10-0.4593": (
"https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.450": (
"https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448": (
"https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448-v2": (
"https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"
),
"ast-finetuned-audioset-12-12-0.447": (
"https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"
),
"ast-finetuned-audioset-14-14-0.443": (
"https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"
),
"ast-finetuned-audioset-16-16-0.442": (
"https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"
),
"ast-finetuned-speech-commands-v2": (
"https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"
),
}
# load original state_dict
_lowercase : int = model_name_to_url[model_name]
_lowercase : str = torch.hub.load_state_dict_from_url(lowercase__ , map_location="cpu" )
# remove some keys
remove_keys(lowercase__ )
# rename some keys
_lowercase : str = convert_state_dict(lowercase__ , lowercase__ )
# load 🤗 model
_lowercase : List[str] = ASTForAudioClassification(lowercase__ )
model.eval()
model.load_state_dict(lowercase__ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
_lowercase : Tuple = -4.2677393 if "speech-commands" not in model_name else -6.845978
_lowercase : Dict = 4.5689974 if "speech-commands" not in model_name else 5.5654526
_lowercase : Optional[Any] = 10_24 if "speech-commands" not in model_name else 1_28
_lowercase : List[Any] = ASTFeatureExtractor(mean=lowercase__ , std=lowercase__ , max_length=lowercase__ )
if "speech-commands" in model_name:
_lowercase : Any = load_dataset("speech_commands" , "v0.02" , split="validation" )
_lowercase : Union[str, Any] = dataset[0]["audio"]["array"]
else:
_lowercase : Optional[int] = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , )
_lowercase , _lowercase : Tuple = torchaudio.load(lowercase__ )
_lowercase : Any = waveform.squeeze().numpy()
_lowercase : Any = feature_extractor(lowercase__ , sampling_rate=1_60_00 , return_tensors="pt" )
# forward pass
_lowercase : Optional[int] = model(**lowercase__ )
_lowercase : Optional[Any] = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
_lowercase : Union[str, Any] = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
_lowercase : Optional[Any] = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
_lowercase : Optional[Any] = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
_lowercase : Tuple = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
_lowercase : Any = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
_lowercase : Any = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
_lowercase : int = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
_lowercase : Any = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError("Unknown model name" )
if not torch.allclose(logits[0, :3] , lowercase__ , atol=1E-4 ):
raise ValueError("Logits don't match" )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(lowercase__ )
if push_to_hub:
print("Pushing model and feature extractor to the hub..." )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_snake_case = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 245 |
def a__ ( lowercase__ = 2_0_0 ):
'''simple docstring'''
UpperCAmelCase_ =[1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
UpperCAmelCase_ =[0] * (pence + 1)
UpperCAmelCase_ =1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowercase__ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 7_3682
| 54 | 0 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
UpperCamelCase__ = """src/diffusers"""
# Matches is_xxx_available()
UpperCamelCase__ = re.compile(r'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
UpperCamelCase__ = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
UpperCamelCase__ = """
{0} = None
"""
UpperCamelCase__ = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
UpperCamelCase__ = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def UpperCamelCase__ ( UpperCAmelCase_ ) -> Optional[int]:
'''simple docstring'''
_lowercase : str = _re_backend.findall(lowercase__ )
if len(lowercase__ ) == 0:
return None
return "_and_".join(lowercase__ )
def UpperCamelCase__ ( ) -> Any:
'''simple docstring'''
with open(os.path.join(lowercase__ , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_lowercase : Union[str, Any] = f.readlines()
# Get to the point we do the actual imports for type checking
_lowercase : Any = 0
_lowercase : Optional[int] = {}
# Go through the end of the file
while line_index < len(lowercase__ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
_lowercase : str = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
_lowercase : str = []
# Until we unindent, add backend objects to the list
while line_index < len(lowercase__ ) and len(lines[line_index] ) > 1:
_lowercase : Union[str, Any] = lines[line_index]
_lowercase : List[Any] = _re_single_line_import.search(lowercase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(lowercase__ ) > 0:
_lowercase : Optional[Any] = objects
else:
line_index += 1
return backend_specific_objects
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ ) -> Union[str, Any]:
'''simple docstring'''
if name.isupper():
return DUMMY_CONSTANT.format(lowercase__ )
elif name.islower():
return DUMMY_FUNCTION.format(lowercase__ , lowercase__ )
else:
return DUMMY_CLASS.format(lowercase__ , lowercase__ )
def UpperCamelCase__ ( UpperCAmelCase_=None ) -> int:
'''simple docstring'''
if backend_specific_objects is None:
_lowercase : Any = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
_lowercase : Any = {}
for backend, objects in backend_specific_objects.items():
_lowercase : Union[str, Any] = '''[''' + ''', '''.join(F'"{b}"' for b in backend.split('''_and_''' ) ) + ''']'''
_lowercase : Tuple = '''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(lowercase__ , lowercase__ ) for o in objects] )
_lowercase : Optional[Any] = dummy_file
return dummy_files
def UpperCamelCase__ ( UpperCAmelCase_=False ) -> Optional[int]:
'''simple docstring'''
_lowercase : List[Any] = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
_lowercase : str = {'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
_lowercase : List[Any] = os.path.join(lowercase__ , '''utils''' )
_lowercase : int = {
backend: os.path.join(lowercase__ , F'dummy_{short_names.get(lowercase__ , lowercase__ )}_objects.py' )
for backend in dummy_files.keys()
}
_lowercase : Optional[int] = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(lowercase__ ):
with open(lowercase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_lowercase : int = f.read()
else:
_lowercase : int = ''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'Updating diffusers.utils.dummy_{short_names.get(lowercase__ , lowercase__ )}_objects.py as the main '
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
F'diffusers.utils.dummy_{short_names.get(lowercase__ , lowercase__ )}_objects.py. Run `make fix-copies` '
'''to fix this.''' )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase__ = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 322 |
import sys
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =len(lowercase__ )
UpperCAmelCase_ =[[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
UpperCAmelCase_ =[[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
for chain_length in range(2 , lowercase__ ):
for a in range(1 , n - chain_length + 1 ):
UpperCAmelCase_ =a + chain_length - 1
UpperCAmelCase_ =sys.maxsize
for c in range(lowercase__ , lowercase__ ):
UpperCAmelCase_ =(
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCAmelCase_ =cost
UpperCAmelCase_ =c
return matrix, sol
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if i == j:
print("A" + str(lowercase__ ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(lowercase__ , lowercase__ , optimal_solution[i][j] )
print_optiomal_solution(lowercase__ , optimal_solution[i][j] + 1 , lowercase__ )
print(")" , end=" " )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
UpperCAmelCase_ =len(lowercase__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCAmelCase_ , UpperCAmelCase_ =matrix_chain_order(lowercase__ )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowercase__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 54 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase , _lowercase: Optional[int] = position
_lowercase: Union[str, Any] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
_lowercase: List[Any] = []
for position in positions:
_lowercase , _lowercase: int = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(lowercase__ )
return permissible_positions
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
return not any(elem == 0 for row in board for elem in row )
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if is_complete(lowercase__ ):
return True
for position in get_valid_pos(lowercase__ , len(lowercase__ ) ):
_lowercase , _lowercase: str = position
if board[y][x] == 0:
_lowercase: Tuple = curr + 1
if open_knight_tour_helper(lowercase__ , lowercase__ , curr + 1 ):
return True
_lowercase: List[Any] = 0
return False
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
_lowercase: List[str] = [[0 for i in range(lowercase__ )] for j in range(lowercase__ )]
for i in range(lowercase__ ):
for j in range(lowercase__ ):
_lowercase: int = 1
if open_knight_tour_helper(lowercase__ , (i, j) , 1 ):
return board
_lowercase: Union[str, Any] = 0
_lowercase: List[Any] = f'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353 |
from math import loga
def a__ ( lowercase__ ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(lowercase__ , lowercase__ ):
raise TypeError("Input value must be a 'int' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 | 0 |
def lowercase__ ( A_: List[str] ) -> List[str]:
"""simple docstring"""
if not head:
return True
# split the list to two parts
__UpperCAmelCase , __UpperCAmelCase =head.next, head
while fast and fast.next:
__UpperCAmelCase =fast.next.next
__UpperCAmelCase =slow.next
__UpperCAmelCase =slow.next
__UpperCAmelCase =None # Don't forget here! But forget still works!
# reverse the second part
__UpperCAmelCase =None
while second:
__UpperCAmelCase =second.next
__UpperCAmelCase =node
__UpperCAmelCase =second
__UpperCAmelCase =nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
__UpperCAmelCase =node.next
__UpperCAmelCase =head.next
return True
def lowercase__ ( A_: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
__UpperCAmelCase =__UpperCAmelCase =__UpperCAmelCase =head
while fast and fast.next:
__UpperCAmelCase , __UpperCAmelCase =fast.next.next, slow.next
# 2. Push the second half into the stack
__UpperCAmelCase =[slow.val]
while slow.next:
__UpperCAmelCase =slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
__UpperCAmelCase =cur.next
return True
def lowercase__ ( A_: Optional[Any] ) -> Tuple:
"""simple docstring"""
if not head or not head.next:
return True
__UpperCAmelCase ={}
__UpperCAmelCase =0
while head:
if head.val in d:
d[head.val].append(lowercase__ )
else:
__UpperCAmelCase =[pos]
__UpperCAmelCase =head.next
pos += 1
__UpperCAmelCase =pos - 1
__UpperCAmelCase =0
for v in d.values():
if len(lowercase__ ) % 2 != 0:
middle += 1
else:
__UpperCAmelCase =0
for i in range(0 , len(lowercase__ ) ):
if v[i] + v[len(lowercase__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 68 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Union[str, Any] =logging.get_logger(__name__)
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =torch.load(lowercase__ , map_location="cpu" )
if "model" in sd.keys():
UpperCAmelCase_ =torch.load(lowercase__ , map_location="cpu" )["model"]
# pop unnecessary weights
UpperCAmelCase_ =[
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowercase__ )
UpperCAmelCase_ ={
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
UpperCAmelCase_ =sd.pop(lowercase__ )
UpperCAmelCase_ =list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
UpperCAmelCase_ =sd[key]
# We split QKV in separate Q,K,V
UpperCAmelCase_ =key.replace(".qkv_proj." , ".q_proj." )
UpperCAmelCase_ =key.replace(".qkv_proj." , ".k_proj." )
UpperCAmelCase_ =key.replace(".qkv_proj." , ".v_proj." )
UpperCAmelCase_ =value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =torch.split(lowercase__ , depth // 3 , dim=0 )
UpperCAmelCase_ =q
UpperCAmelCase_ =k
UpperCAmelCase_ =v
del sd[key]
return sd
@torch.no_grad()
def a__ ( lowercase__ , lowercase__ , lowercase__=None ):
'''simple docstring'''
UpperCAmelCase_ =load_checkpoint(lowercase__ )
if config is not None:
UpperCAmelCase_ =OPTConfig.from_pretrained(lowercase__ )
else:
UpperCAmelCase_ =OPTConfig()
UpperCAmelCase_ =OPTModel(lowercase__ ).half().eval()
model.load_state_dict(lowercase__ )
# Check results
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
if __name__ == "__main__":
__lowercase : List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
__lowercase : str =parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 54 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
SCREAMING_SNAKE_CASE__ : List[str] = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
__lowercase : str ={
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
__lowercase : Any ={
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =(images / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_ =images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCAmelCase_ =numpy_to_pil(lowercase__ )
return images
def a__ ( lowercase__ ):
'''simple docstring'''
if images.ndim == 3:
UpperCAmelCase_ =images[None, ...]
UpperCAmelCase_ =(images * 2_5_5).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
UpperCAmelCase_ =[Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
UpperCAmelCase_ =[Image.fromarray(lowercase__ ) for image in images]
return pil_images
| 54 | 0 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class __lowercase ( unittest.TestCase ):
@require_torch
def __a ( self : Dict ) -> int:
'''simple docstring'''
lowercase = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' )
lowercase = load_dataset('''ashraq/esc50''' )
lowercase = dataset['''train''']['''audio'''][-1]['''array''']
lowercase = audio_classifier(_lowerCAmelCase , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [{'''score''': 0.501, '''label''': '''Sound of a dog'''}, {'''score''': 0.499, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''' )
def __a ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
pass
@slow
@require_torch
def __a ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
lowercase = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
lowercase = load_dataset('''ashraq/esc50''' )
lowercase = dataset['''train''']['''audio'''][-1]['''array''']
lowercase = audio_classifier(_lowerCAmelCase , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
] , )
lowercase = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
lowercase = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''' )
def __a ( self : Tuple ) -> List[str]:
'''simple docstring'''
pass
| 604 |
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =int(lowercase__ )
if n_element < 1:
UpperCAmelCase_ =ValueError("a should be a positive number" )
raise my_error
UpperCAmelCase_ =[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =(0, 0, 0)
UpperCAmelCase_ =1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
__lowercase : Tuple =input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
__lowercase : Union[str, Any] =hamming(int(n))
print("""-----------------------------------------------------""")
print(f"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 54 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = 0.00
_UpperCamelCase : Union[str, Any] = 0
for resistor in resistors:
if resistor <= 0:
_UpperCamelCase : str = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(lowercase__ )
first_sum += 1 / float(lowercase__ )
index += 1
return 1 / first_sum
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : List[Any] = 0.00
_UpperCamelCase : List[Any] = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
_UpperCamelCase : List[Any] = F'''Resistor at index {index} has a negative value!'''
raise ValueError(lowercase__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 624 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__lowercase : List[Any] =logging.get_logger(__name__)
class A ( __lowercase ):
def __init__( self: List[Any] , *_lowerCAmelCase: Optional[Any] , **_lowerCAmelCase: List[str] ) -> None:
'''simple docstring'''
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 54 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
__lowerCAmelCase : List[Any] ={"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCAmelCase : List[str] ={
"""vocab_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"""
},
"""merges_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"""
},
}
__lowerCAmelCase : Union[str, Any] ={"""allegro/herbert-base-cased""": 5_1_4}
__lowerCAmelCase : Union[str, Any] ={}
class _A ( __lowercase ):
snake_case__ : List[str] = VOCAB_FILES_NAMES
snake_case__ : str = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
snake_case__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : List[str] = HerbertTokenizer
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="<s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<mask>" , __lowerCAmelCase="</s>" , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
_lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , cls_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , **_lowerCAmelCase , )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
lowercase = [self.cls_token_id]
lowercase = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1] + ([0] * len(_lowerCAmelCase )) + [1]
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
lowercase = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 359 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class A ( __lowercase , unittest.TestCase ):
_snake_case =CanineTokenizer
_snake_case =False
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
UpperCAmelCase_ =CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase__ ( self: Optional[int] ) -> List[str]:
'''simple docstring'''
return CanineTokenizer.from_pretrained("google/canine-s" )
def lowerCAmelCase__ ( self: Union[str, Any] , **_lowerCAmelCase: List[Any] ) -> CanineTokenizer:
'''simple docstring'''
UpperCAmelCase_ =self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
UpperCAmelCase_ =1024
return tokenizer
@require_torch
def lowerCAmelCase__ ( self: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
UpperCAmelCase_ =[5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
UpperCAmelCase_ =tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =list(batch.input_ids.numpy()[0] )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def lowerCAmelCase__ ( self: int ) -> str:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
UpperCAmelCase_ =tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , _lowerCAmelCase )
self.assertIn("attention_mask" , _lowerCAmelCase )
self.assertIn("token_type_ids" , _lowerCAmelCase )
@require_torch
def lowerCAmelCase__ ( self: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =[
"What's the weater?",
"It's about 25 degrees.",
]
UpperCAmelCase_ =tokenizer(
text_target=_lowerCAmelCase , max_length=32 , padding="max_length" , truncation=_lowerCAmelCase , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def lowerCAmelCase__ ( self: Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ =tempfile.mkdtemp()
UpperCAmelCase_ =" He is very happy, UNwant\u00E9d,running"
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
shutil.rmtree(_lowerCAmelCase )
UpperCAmelCase_ =self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ =tempfile.mkdtemp()
UpperCAmelCase_ =" He is very happy, UNwant\u00E9d,running"
UpperCAmelCase_ =tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
UpperCAmelCase_ =chr(0xe0_07 )
additional_special_tokens.append(_lowerCAmelCase )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertIn(_lowerCAmelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_lowerCAmelCase )
def lowerCAmelCase__ ( self: int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ , UpperCAmelCase_ =self.get_clean_sequence(_lowerCAmelCase )
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_05
UpperCAmelCase_ =chr(_lowerCAmelCase )
tokenizer.add_special_tokens({"cls_token": special_token} )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
UpperCAmelCase_ =tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , input_encoded + special_token_id )
UpperCAmelCase_ =tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def lowerCAmelCase__ ( self: Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ =chr(0xe0_05 )
UpperCAmelCase_ =chr(0xe0_06 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=_lowerCAmelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
UpperCAmelCase_ =tokenizer.tokenize(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.tokenize(_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
self.assertEqual(token_a[0] , _lowerCAmelCase )
self.assertEqual(token_a[0] , _lowerCAmelCase )
@require_tokenizers
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
UpperCAmelCase_ =AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(_lowerCAmelCase )
tokenizer.from_pretrained(_lowerCAmelCase )
def lowerCAmelCase__ ( self: Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase_ =json.load(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase_ =json.load(_lowerCAmelCase )
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
UpperCAmelCase_ =[new_token_a]
UpperCAmelCase_ =[new_token_a]
with open(os.path.join(_lowerCAmelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase_ =tokenizer_class.from_pretrained(_lowerCAmelCase , extra_ids=0 )
self.assertIn(_lowerCAmelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
UpperCAmelCase_ =0xe0_07
UpperCAmelCase_ =chr(_lowerCAmelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase_ =[AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase )]
UpperCAmelCase_ =tokenizer_class.from_pretrained(
_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , extra_ids=0 )
self.assertIn(_lowerCAmelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ ="hello world"
if self.space_between_special_tokens:
UpperCAmelCase_ ="[CLS] hello world [SEP]"
else:
UpperCAmelCase_ =input
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.decode(_lowerCAmelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(_lowerCAmelCase , [output, output.lower()] )
def lowerCAmelCase__ ( self: List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ =[
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
UpperCAmelCase_ ="a"
UpperCAmelCase_ =ord(_lowerCAmelCase )
for attr in attributes_list:
setattr(_lowerCAmelCase , attr + "_id" , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , attr + "_id" ) , _lowerCAmelCase )
setattr(_lowerCAmelCase , attr + "_id" , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , attr + "_id" ) , _lowerCAmelCase )
setattr(_lowerCAmelCase , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens_ids" ) , [] )
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
setattr(_lowerCAmelCase , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def lowerCAmelCase__ ( self: List[str] ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Dict ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Any ) -> List[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: List[Any] ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Tuple ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: str ) -> str:
'''simple docstring'''
pass
| 54 | 0 |
"""simple docstring"""
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def A ( snake_case__ ):
'''simple docstring'''
return np.dot(lowercase__ , lowercase__ )
class lowerCamelCase :
def __init__( self : List[Any] , *,
__UpperCAmelCase : float = np.inf , __UpperCAmelCase : str = "linear" , __UpperCAmelCase : float = 0.0 , ) -> None:
SCREAMING_SNAKE_CASE__ = regularization
SCREAMING_SNAKE_CASE__ = gamma
if kernel == "linear":
SCREAMING_SNAKE_CASE__ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("""rbf kernel requires gamma""" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("""gamma must be float or int""" )
if not self.gamma > 0:
raise ValueError("""gamma must be > 0""" )
SCREAMING_SNAKE_CASE__ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
SCREAMING_SNAKE_CASE__ = F"""Unknown kernel: {kernel}"""
raise ValueError(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : ndarray , __UpperCAmelCase : ndarray ) -> float:
return np.dot(_lowerCAmelCase , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : ndarray , __UpperCAmelCase : ndarray ) -> float:
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : list[ndarray] , __UpperCAmelCase : ndarray ) -> None:
SCREAMING_SNAKE_CASE__ = observations
SCREAMING_SNAKE_CASE__ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((SCREAMING_SNAKE_CASE__ ) , ) = np.shape(_lowerCAmelCase )
def to_minimize(__UpperCAmelCase : ndarray ) -> float:
SCREAMING_SNAKE_CASE__ = 0
((SCREAMING_SNAKE_CASE__ ) , ) = np.shape(_lowerCAmelCase )
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = LinearConstraint(_lowerCAmelCase , 0 , 0 )
SCREAMING_SNAKE_CASE__ = Bounds(0 , self.regularization )
SCREAMING_SNAKE_CASE__ = minimize(
_lowerCAmelCase , np.ones(_lowerCAmelCase ) , bounds=_lowerCAmelCase , constraints=[ly_contraint] ).x
SCREAMING_SNAKE_CASE__ = l_star
# calculating mean offset of separation plane to points
SCREAMING_SNAKE_CASE__ = 0
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
SCREAMING_SNAKE_CASE__ = s / n
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : ndarray ) -> int:
SCREAMING_SNAKE_CASE__ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , _lowerCAmelCase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 196 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowercase : Optional[int] ="""\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
__lowercase : Dict ="""\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
__lowercase : List[str] ="""\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def lowerCAmelCase__ ( self: int ) -> MetricInfo:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: List[List[List[str]]] , _lowerCAmelCase: List[List[str]] , _lowerCAmelCase: int = 1 , _lowerCAmelCase: int = 4 , ) -> Dict[str, float]:
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_lowerCAmelCase , hypotheses=_lowerCAmelCase , min_len=_lowerCAmelCase , max_len=_lowerCAmelCase )
}
| 54 | 0 |
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class __UpperCamelCase :
def __init__( self ,_A=None ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = list(poly_a or [0] )[:]
_lowerCAmelCase : Optional[int] = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
_lowerCAmelCase : str = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
_lowerCAmelCase : List[str] = len(self.polyB )
# Add 0 to make lengths equal a power of 2
_lowerCAmelCase : Tuple = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
_lowerCAmelCase : Union[str, Any] = complex(mpmath.root(x=1 ,n=self.c_max_length ,k=1 ) )
# The product
_lowerCAmelCase : int = self.__multiply()
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(_lowerCAmelCase ) <= 1:
return dft[0]
#
_lowerCAmelCase : int = self.c_max_length // 2
while next_ncol > 0:
_lowerCAmelCase : Optional[int] = [[] for i in range(_lowerCAmelCase )]
_lowerCAmelCase : Union[str, Any] = self.root**next_ncol
# First half of next step
_lowerCAmelCase : List[str] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_lowerCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
_lowerCAmelCase : Union[str, Any] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_lowerCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
_lowerCAmelCase : str = new_dft
_lowerCAmelCase : Optional[Any] = next_ncol // 2
return dft[0]
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.__dft('A' )
_lowerCAmelCase : Optional[Any] = self.__dft('B' )
_lowerCAmelCase : Dict = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
_lowerCAmelCase : Dict = 2
while next_ncol <= self.c_max_length:
_lowerCAmelCase : Optional[Any] = [[] for i in range(_lowerCAmelCase )]
_lowerCAmelCase : Union[str, Any] = self.root ** (next_ncol // 2)
_lowerCAmelCase : str = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
_lowerCAmelCase : Dict = new_inverse_c
next_ncol *= 2
# Unpack
_lowerCAmelCase : List[Any] = [round(x[0].real ,8 ) + round(x[0].imag ,8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 'A = ' + ' + '.join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.polyA[: self.len_A] ) )
_lowerCAmelCase : str = 'B = ' + ' + '.join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.polyB[: self.len_B] ) )
_lowerCAmelCase : Optional[Any] = 'A*B = ' + ' + '.join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.product ) )
return F"""{a}\n{b}\n{c}"""
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 259 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A ( __lowercase , unittest.TestCase ):
_snake_case =KandinskyVaaImgaImgPipeline
_snake_case =['''image_embeds''', '''negative_image_embeds''', '''image''']
_snake_case =[
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
_snake_case =[
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_snake_case =False
@property
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[str]:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__ ( self: List[str] ) -> Dict:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self: int ) -> str:
'''simple docstring'''
return 100
@property
def lowerCAmelCase__ ( self: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ ={
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCAmelCase_ =UNetaDConditionModel(**_lowerCAmelCase )
return model
@property
def lowerCAmelCase__ ( self: Any ) -> Tuple:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ =VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase__ ( self: Dict ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.dummy_unet
UpperCAmelCase_ =self.dummy_movq
UpperCAmelCase_ ={
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
UpperCAmelCase_ =DDIMScheduler(**_lowerCAmelCase )
UpperCAmelCase_ ={
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: Any , _lowerCAmelCase: Optional[Any]=0 ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
UpperCAmelCase_ =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCAmelCase )
# create init_image
UpperCAmelCase_ =floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
UpperCAmelCase_ =image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ =Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert("RGB" ).resize((256, 256) )
if str(_lowerCAmelCase ).startswith("mps" ):
UpperCAmelCase_ =torch.manual_seed(_lowerCAmelCase )
else:
UpperCAmelCase_ =torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
UpperCAmelCase_ ={
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self: int ) -> int:
'''simple docstring'''
UpperCAmelCase_ ="cpu"
UpperCAmelCase_ =self.get_dummy_components()
UpperCAmelCase_ =self.pipeline_class(**_lowerCAmelCase )
UpperCAmelCase_ =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
UpperCAmelCase_ =output.images
UpperCAmelCase_ =pipe(
**self.get_dummy_inputs(_lowerCAmelCase ) , return_dict=_lowerCAmelCase , )[0]
UpperCAmelCase_ =image[0, -3:, -3:, -1]
UpperCAmelCase_ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ =np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: List[Any] ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_img2img_frog.npy" )
UpperCAmelCase_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCAmelCase_ ="A red cartoon frog, 4k"
UpperCAmelCase_ =KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCAmelCase )
UpperCAmelCase_ =KandinskyVaaImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
UpperCAmelCase_ =pipeline.to(_lowerCAmelCase )
pipeline.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ , UpperCAmelCase_ =pipe_prior(
_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCAmelCase_ =pipeline(
image=_lowerCAmelCase , image_embeds=_lowerCAmelCase , negative_image_embeds=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
UpperCAmelCase_ =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 54 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __snake_case (__lowercase ):
lowerCAmelCase__ = ["pixel_values"]
def __init__( self : int , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[int, float] = 1 / 255 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : bool = True , **_UpperCAmelCase : Union[str, Any] , ) -> None:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase : Optional[Any] = size if size is not None else {"""shortest_edge""": 224}
_lowerCAmelCase : List[str] = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_lowerCAmelCase : Tuple = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase , param_name="""crop_size""" )
_lowerCAmelCase : Optional[int] = do_resize
_lowerCAmelCase : Optional[int] = size
_lowerCAmelCase : int = resample
_lowerCAmelCase : int = do_center_crop
_lowerCAmelCase : Any = crop_size
_lowerCAmelCase : int = do_rescale
_lowerCAmelCase : Any = rescale_factor
_lowerCAmelCase : Dict = do_normalize
_lowerCAmelCase : List[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_lowerCAmelCase : Optional[int] = image_std if image_std is not None else OPENAI_CLIP_STD
_lowerCAmelCase : Dict = do_convert_rgb
def SCREAMING_SNAKE_CASE ( self : int , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Optional[int] , ) -> np.ndarray:
'''simple docstring'''
_lowerCAmelCase : List[Any] = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
_lowerCAmelCase : Optional[Any] = get_resize_output_image_size(_lowerCAmelCase , size=size["""shortest_edge"""] , default_to_square=_lowerCAmelCase )
return resize(_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Optional[int] , ) -> np.ndarray:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(_lowerCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[int, float] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Dict , ) -> int:
'''simple docstring'''
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Union[str, Any] , ) -> np.ndarray:
'''simple docstring'''
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCAmelCase : ImageInput , _UpperCAmelCase : bool = None , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : int = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : float = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **_UpperCAmelCase : Dict , ) -> PIL.Image.Image:
'''simple docstring'''
_lowerCAmelCase : Any = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : List[str] = size if size is not None else self.size
_lowerCAmelCase : Optional[Any] = get_size_dict(_lowerCAmelCase , param_name="""size""" , default_to_square=_lowerCAmelCase )
_lowerCAmelCase : Optional[Any] = resample if resample is not None else self.resample
_lowerCAmelCase : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase : List[Any] = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase : Optional[int] = get_size_dict(_lowerCAmelCase , param_name="""crop_size""" , default_to_square=_lowerCAmelCase )
_lowerCAmelCase : Tuple = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase : List[str] = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase : Dict = image_std if image_std is not None else self.image_std
_lowerCAmelCase : Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_lowerCAmelCase : Tuple = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_lowerCAmelCase : Optional[int] = [convert_to_rgb(_lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
_lowerCAmelCase : str = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
_lowerCAmelCase : int = [self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_center_crop:
_lowerCAmelCase : List[str] = [self.center_crop(image=_lowerCAmelCase , size=_lowerCAmelCase ) for image in images]
if do_rescale:
_lowerCAmelCase : int = [self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
if do_normalize:
_lowerCAmelCase : Tuple = [self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images]
_lowerCAmelCase : List[str] = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
_lowerCAmelCase : int = {"""pixel_values""": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 429 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class A ( unittest.TestCase ):
def __init__( self: Optional[int] , _lowerCAmelCase: Tuple , _lowerCAmelCase: Optional[Any]=13 , _lowerCAmelCase: Optional[int]=7 , _lowerCAmelCase: Any=True , _lowerCAmelCase: List[Any]=True , _lowerCAmelCase: List[str]=True , _lowerCAmelCase: str=True , _lowerCAmelCase: Optional[int]=99 , _lowerCAmelCase: Any=32 , _lowerCAmelCase: Any=5 , _lowerCAmelCase: Tuple=4 , _lowerCAmelCase: Union[str, Any]=37 , _lowerCAmelCase: List[str]="gelu" , _lowerCAmelCase: Dict=0.1 , _lowerCAmelCase: Tuple=0.1 , _lowerCAmelCase: int=512 , _lowerCAmelCase: Tuple=16 , _lowerCAmelCase: Tuple=2 , _lowerCAmelCase: str=0.02 , _lowerCAmelCase: Optional[Any]=4 , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =seq_length
UpperCAmelCase_ =is_training
UpperCAmelCase_ =use_attention_mask
UpperCAmelCase_ =use_token_type_ids
UpperCAmelCase_ =use_labels
UpperCAmelCase_ =vocab_size
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =num_attention_heads
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =max_position_embeddings
UpperCAmelCase_ =type_vocab_size
UpperCAmelCase_ =type_sequence_label_size
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =num_choices
def lowerCAmelCase__ ( self: Dict ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ =None
if self.use_attention_mask:
UpperCAmelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ =None
if self.use_token_type_ids:
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ =RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self: str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self: Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ =True
UpperCAmelCase_ =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A ( __lowercase , unittest.TestCase ):
_snake_case =True
_snake_case =(
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self: Dict ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =FlaxRobertaModelTester(self )
@slow
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase_ =model_class_name.from_pretrained("roberta-base" , from_pt=_lowerCAmelCase )
UpperCAmelCase_ =model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
| 54 | 0 |
'''simple docstring'''
_snake_case = """Alexander Joslin"""
import operator as op
from .stack import Stack
def _A ( snake_case ) -> Union[str, Any]:
_lowercase : List[str] = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
_lowercase : List[str] = Stack()
_lowercase : Any = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowercase__ ) )
elif i in operators:
# RULE 2
operator_stack.push(lowercase__ )
elif i == ")":
# RULE 4
_lowercase : List[str] = operator_stack.peek()
operator_stack.pop()
_lowercase : Tuple = operand_stack.peek()
operand_stack.pop()
_lowercase : Tuple = operand_stack.peek()
operand_stack.pop()
_lowercase : str = operators[opr](lowercase__ , lowercase__ )
operand_stack.push(lowercase__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_snake_case = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 245 |
from __future__ import annotations
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , a % b )
UpperCAmelCase_ =a // b
return (y, x - k * y)
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , lowercase__ )
UpperCAmelCase_ =na * na
UpperCAmelCase_ =ra * x * na + ra * y * na
return (n % m + m) % m
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , lowercase__ )
if b < 0:
UpperCAmelCase_ =(b % n + n) % n
return b
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =invert_modulo(lowercase__ , lowercase__ ), invert_modulo(lowercase__ , lowercase__ )
UpperCAmelCase_ =na * na
UpperCAmelCase_ =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="""chinese_remainder_theorem""", verbose=True)
testmod(name="""chinese_remainder_theorem2""", verbose=True)
testmod(name="""invert_modulo""", verbose=True)
testmod(name="""extended_euclid""", verbose=True)
| 54 | 0 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def UpperCamelCase__ ( UpperCAmelCase_ ) -> List[Any]:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
_lowercase : Tuple = precision
_lowercase : int = ceil(precision / 14 )
_lowercase : Dict = 426880 * Decimal(10005 ).sqrt()
_lowercase : Any = 1
_lowercase : str = 13591409
_lowercase : Any = Decimal(lowercase__ )
for k in range(1 , lowercase__ ):
_lowercase : List[str] = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowercase__ ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
UpperCamelCase__ = 50
print(F"""The first {n} digits of pi is: {pi(n)}""")
| 322 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__lowercase : Tuple =logging.getLogger(__name__)
__lowercase : Optional[int] =tf.data.AUTOTUNE
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config" , type=lowercase__ , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , )
parser.add_argument(
"--tokenizer" , type=lowercase__ , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , )
parser.add_argument(
"--per_replica_batch_size" , type=lowercase__ , default=8 , help="Batch size per TPU core." , )
parser.add_argument(
"--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , )
parser.add_argument(
"--tpu_name" , type=lowercase__ , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , )
parser.add_argument(
"--tpu_zone" , type=lowercase__ , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , )
parser.add_argument(
"--gcp_project" , type=lowercase__ , help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , )
parser.add_argument(
"--train_dataset" , type=lowercase__ , help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--shuffle_buffer_size" , type=lowercase__ , default=2**1_8 , help="Size of the shuffle buffer (in samples)" , )
parser.add_argument(
"--eval_dataset" , type=lowercase__ , help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--num_epochs" , type=lowercase__ , default=1 , help="Number of epochs to train for." , )
parser.add_argument(
"--learning_rate" , type=lowercase__ , default=1E-4 , help="Learning rate to use for training." , )
parser.add_argument(
"--weight_decay_rate" , type=lowercase__ , default=1E-3 , help="Weight decay rate to use for training." , )
parser.add_argument(
"--max_length" , type=lowercase__ , default=5_1_2 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , )
parser.add_argument(
"--mlm_probability" , type=lowercase__ , default=0.15 , help="Fraction of tokens to mask during training." , )
parser.add_argument("--output_dir" , type=lowercase__ , required=lowercase__ , help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id" , type=lowercase__ , help="Model ID to upload to on the Hugging Face Hub." )
UpperCAmelCase_ =parser.parse_args()
return args
def a__ ( lowercase__ ):
'''simple docstring'''
try:
if args.tpu_name:
UpperCAmelCase_ =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
UpperCAmelCase_ =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(lowercase__ )
tf.tpu.experimental.initialize_tpu_system(lowercase__ )
return tpu
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =0
for file in file_list:
UpperCAmelCase_ =file.split("/" )[-1]
UpperCAmelCase_ =re.search(R"-\d+-(\d+)\.tfrecord" , lowercase__ ).group(1 )
UpperCAmelCase_ =int(lowercase__ )
num_samples += sample_count
return num_samples
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=None ):
'''simple docstring'''
UpperCAmelCase_ =count_samples(lowercase__ )
UpperCAmelCase_ =tf.data.Dataset.from_tensor_slices(lowercase__ )
if shuffle:
UpperCAmelCase_ =dataset.shuffle(len(lowercase__ ) )
UpperCAmelCase_ =tf.data.TFRecordDataset(lowercase__ , num_parallel_reads=lowercase__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
UpperCAmelCase_ =dataset.apply(tf.data.experimental.assert_cardinality(lowercase__ ) )
UpperCAmelCase_ =dataset.map(lowercase__ , num_parallel_calls=lowercase__ )
if shuffle:
assert shuffle_buffer_size is not None
UpperCAmelCase_ =dataset.shuffle(args.shuffle_buffer_size )
UpperCAmelCase_ =dataset.batch(lowercase__ , drop_remainder=lowercase__ )
UpperCAmelCase_ =dataset.map(lowercase__ , num_parallel_calls=lowercase__ )
UpperCAmelCase_ =dataset.prefetch(lowercase__ )
return dataset
def a__ ( lowercase__ ):
'''simple docstring'''
if not args.no_tpu:
UpperCAmelCase_ =initialize_tpu(lowercase__ )
UpperCAmelCase_ =tf.distribute.TPUStrategy(lowercase__ )
else:
UpperCAmelCase_ =tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
UpperCAmelCase_ =AutoTokenizer.from_pretrained(args.tokenizer )
UpperCAmelCase_ =AutoConfig.from_pretrained(args.pretrained_model_config )
UpperCAmelCase_ =tokenizer.vocab_size
UpperCAmelCase_ =tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) )
if not training_records:
raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' )
UpperCAmelCase_ =tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) )
if not eval_records:
raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' )
UpperCAmelCase_ =count_samples(lowercase__ )
UpperCAmelCase_ =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
UpperCAmelCase_ =steps_per_epoch * args.num_epochs
with strategy.scope():
UpperCAmelCase_ =TFAutoModelForMaskedLM.from_config(lowercase__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
UpperCAmelCase_ , UpperCAmelCase_ =create_optimizer(
num_train_steps=lowercase__ , num_warmup_steps=total_train_steps // 2_0 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowercase__ , metrics=["accuracy"] )
def decode_fn(lowercase__ ):
UpperCAmelCase_ ={
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowercase__ , lowercase__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
UpperCAmelCase_ =DataCollatorForLanguageModeling(
tokenizer=lowercase__ , mlm_probability=args.mlm_probability , mlm=lowercase__ , return_tensors="tf" )
def mask_with_collator(lowercase__ ):
# TF really needs an isin() function
UpperCAmelCase_ =(
~tf.cast(batch["attention_mask"] , tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
UpperCAmelCase_ , UpperCAmelCase_ =data_collator.tf_mask_tokens(
batch["input_ids"] , vocab_size=len(lowercase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase__ , )
return batch
UpperCAmelCase_ =args.per_replica_batch_size * strategy.num_replicas_in_sync
UpperCAmelCase_ =prepare_dataset(
lowercase__ , decode_fn=lowercase__ , mask_fn=lowercase__ , batch_size=lowercase__ , shuffle=lowercase__ , shuffle_buffer_size=args.shuffle_buffer_size , )
UpperCAmelCase_ =prepare_dataset(
lowercase__ , decode_fn=lowercase__ , mask_fn=lowercase__ , batch_size=lowercase__ , shuffle=lowercase__ , )
UpperCAmelCase_ =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase__ ) )
model.fit(
lowercase__ , validation_data=lowercase__ , epochs=args.num_epochs , callbacks=lowercase__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__lowercase : Union[str, Any] =parse_args()
main(args)
| 54 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class __magic_name__ ( __lowercase ):
UpperCamelCase_ = 42
class __magic_name__ ( __lowercase , __lowercase ):
UpperCamelCase_ = True
@register_to_config
def __init__( self , A_ = 3 , A_ = 3 , A_ = ("DownEncoderBlock2D",) , A_ = ("UpDecoderBlock2D",) , A_ = (64,) , A_ = 1 , A_ = "silu" , A_ = 4 , A_ = 32 , A_ = 32 , A_ = 0.1_82_15 , ) -> Tuple:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
_lowercase: Optional[Any] = Encoder(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , down_block_types=_lowerCAmelCase , block_out_channels=_lowerCAmelCase , layers_per_block=_lowerCAmelCase , act_fn=_lowerCAmelCase , norm_num_groups=_lowerCAmelCase , double_z=_lowerCAmelCase , )
# pass init params to Decoder
_lowercase: List[Any] = Decoder(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , up_block_types=_lowerCAmelCase , block_out_channels=_lowerCAmelCase , layers_per_block=_lowerCAmelCase , norm_num_groups=_lowerCAmelCase , act_fn=_lowerCAmelCase , )
_lowercase: Tuple = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
_lowercase: Optional[Any] = nn.Convad(_lowerCAmelCase , _lowerCAmelCase , 1 )
_lowercase: str = False
_lowercase: List[str] = False
# only relevant if vae tiling is enabled
_lowercase: Optional[int] = self.config.sample_size
_lowercase: List[str] = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
_lowercase: Tuple = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
_lowercase: Optional[int] = 0.25
def lowercase_ ( self , A_ , A_=False ) -> Tuple:
"""simple docstring"""
if isinstance(_lowerCAmelCase , (Encoder, Decoder) ):
_lowercase: Tuple = value
def lowercase_ ( self , A_ = True ) -> Optional[Any]:
"""simple docstring"""
_lowercase: List[Any] = use_tiling
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
self.enable_tiling(_lowerCAmelCase )
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowercase: Any = True
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
_lowercase: Any = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowercase_ ( self ) -> Dict[str, AttentionProcessor]:
"""simple docstring"""
_lowercase: Tuple = {}
def fn_recursive_add_processors(A_ , A_ , A_ ):
if hasattr(_lowerCAmelCase , '''set_processor''' ):
_lowercase: Dict = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , _lowerCAmelCase , _lowerCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return processors
def lowercase_ ( self , A_ ) -> Optional[int]:
"""simple docstring"""
_lowercase: Optional[Any] = len(self.attn_processors.keys() )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(_lowerCAmelCase )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(A_ , A_ , A_ ):
if hasattr(_lowerCAmelCase , '''set_processor''' ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
module.set_processor(_lowerCAmelCase )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , _lowerCAmelCase , _lowerCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def lowercase_ ( self ) -> int:
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def lowercase_ ( self , A_ , A_ = True ) -> AutoencoderKLOutput:
"""simple docstring"""
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_lowerCAmelCase , return_dict=_lowerCAmelCase )
if self.use_slicing and x.shape[0] > 1:
_lowercase: str = [self.encoder(_lowerCAmelCase ) for x_slice in x.split(1 )]
_lowercase: str = torch.cat(_lowerCAmelCase )
else:
_lowercase: Dict = self.encoder(_lowerCAmelCase )
_lowercase: List[str] = self.quant_conv(_lowerCAmelCase )
_lowercase: Optional[int] = DiagonalGaussianDistribution(_lowerCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_lowerCAmelCase )
def lowercase_ ( self , A_ , A_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_lowerCAmelCase , return_dict=_lowerCAmelCase )
_lowercase: Dict = self.post_quant_conv(_lowerCAmelCase )
_lowercase: Any = self.decoder(_lowerCAmelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCAmelCase )
@apply_forward_hook
def lowercase_ ( self , A_ , A_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if self.use_slicing and z.shape[0] > 1:
_lowercase: Union[str, Any] = [self._decode(_lowerCAmelCase ).sample for z_slice in z.split(1 )]
_lowercase: Tuple = torch.cat(_lowerCAmelCase )
else:
_lowercase: Tuple = self._decode(_lowerCAmelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_lowerCAmelCase )
def lowercase_ ( self , A_ , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
_lowercase: Tuple = min(a.shape[2] , b.shape[2] , _lowerCAmelCase )
for y in range(_lowerCAmelCase ):
_lowercase: Optional[int] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def lowercase_ ( self , A_ , A_ , A_ ) -> Optional[Any]:
"""simple docstring"""
_lowercase: str = min(a.shape[3] , b.shape[3] , _lowerCAmelCase )
for x in range(_lowerCAmelCase ):
_lowercase: List[Any] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def lowercase_ ( self , A_ , A_ = True ) -> AutoencoderKLOutput:
"""simple docstring"""
_lowercase: Optional[Any] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
_lowercase: Optional[int] = int(self.tile_latent_min_size * self.tile_overlap_factor )
_lowercase: List[Any] = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
_lowercase: List[str] = []
for i in range(0 , x.shape[2] , _lowerCAmelCase ):
_lowercase: Any = []
for j in range(0 , x.shape[3] , _lowerCAmelCase ):
_lowercase: Any = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
_lowercase: int = self.encoder(_lowerCAmelCase )
_lowercase: Union[str, Any] = self.quant_conv(_lowerCAmelCase )
row.append(_lowerCAmelCase )
rows.append(_lowerCAmelCase )
_lowercase: List[Any] = []
for i, row in enumerate(_lowerCAmelCase ):
_lowercase: List[str] = []
for j, tile in enumerate(_lowerCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_lowercase: List[str] = self.blend_v(rows[i - 1][j] , _lowerCAmelCase , _lowerCAmelCase )
if j > 0:
_lowercase: Optional[int] = self.blend_h(row[j - 1] , _lowerCAmelCase , _lowerCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_lowerCAmelCase , dim=3 ) )
_lowercase: Tuple = torch.cat(_lowerCAmelCase , dim=2 )
_lowercase: int = DiagonalGaussianDistribution(_lowerCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_lowerCAmelCase )
def lowercase_ ( self , A_ , A_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
_lowercase: Dict = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
_lowercase: str = int(self.tile_sample_min_size * self.tile_overlap_factor )
_lowercase: Optional[Any] = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
_lowercase: int = []
for i in range(0 , z.shape[2] , _lowerCAmelCase ):
_lowercase: Optional[Any] = []
for j in range(0 , z.shape[3] , _lowerCAmelCase ):
_lowercase: Optional[Any] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
_lowercase: Optional[int] = self.post_quant_conv(_lowerCAmelCase )
_lowercase: int = self.decoder(_lowerCAmelCase )
row.append(_lowerCAmelCase )
rows.append(_lowerCAmelCase )
_lowercase: Tuple = []
for i, row in enumerate(_lowerCAmelCase ):
_lowercase: Union[str, Any] = []
for j, tile in enumerate(_lowerCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_lowercase: str = self.blend_v(rows[i - 1][j] , _lowerCAmelCase , _lowerCAmelCase )
if j > 0:
_lowercase: List[str] = self.blend_h(row[j - 1] , _lowerCAmelCase , _lowerCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_lowerCAmelCase , dim=3 ) )
_lowercase: Dict = torch.cat(_lowerCAmelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCAmelCase )
def lowercase_ ( self , A_ , A_ = False , A_ = True , A_ = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
_lowercase: Union[str, Any] = sample
_lowercase: str = self.encode(_lowerCAmelCase ).latent_dist
if sample_posterior:
_lowercase: Dict = posterior.sample(generator=_lowerCAmelCase )
else:
_lowercase: Optional[int] = posterior.mode()
_lowercase: Any = self.decode(_lowerCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCAmelCase )
| 353 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A :
@staticmethod
def lowerCAmelCase__ ( *_lowerCAmelCase: List[Any] , **_lowerCAmelCase: List[str] ) -> List[str]:
'''simple docstring'''
pass
@is_pipeline_test
@require_torch
@require_vision
class A ( unittest.TestCase ):
_snake_case =MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: List[str] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
UpperCAmelCase_ =[
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: str ) -> int:
'''simple docstring'''
UpperCAmelCase_ =vqa_pipeline(_lowerCAmelCase , top_k=1 )
self.assertEqual(
_lowerCAmelCase , [
[{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}],
[{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}],
] , )
@require_torch
def lowerCAmelCase__ ( self: Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
UpperCAmelCase_ ="./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ ="How many cats are there?"
UpperCAmelCase_ =vqa_pipeline(image=_lowerCAmelCase , question="How many cats are there?" , top_k=2 )
self.assertEqual(
_lowerCAmelCase , [{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}, {"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}] )
UpperCAmelCase_ =vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
_lowerCAmelCase , [{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}, {"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}] )
@slow
@require_torch
def lowerCAmelCase__ ( self: List[str] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
UpperCAmelCase_ ="./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ ="How many cats are there?"
UpperCAmelCase_ =vqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
UpperCAmelCase_ =vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
UpperCAmelCase_ =vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [[{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def lowerCAmelCase__ ( self: int ) -> List[str]:
'''simple docstring'''
pass
| 54 | 0 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__A = None
try:
import msvcrt
except ImportError:
__A = None
try:
import fcntl
except ImportError:
__A = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__A = OSError
# Data
# ------------------------------------------------
__A = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
__A = """3.0.12"""
__A = None
def lowercase__ ( ) -> Any:
"""simple docstring"""
global _logger
__UpperCAmelCase =_logger or logging.getLogger(__name__ )
return _logger
class _A ( __lowercase ):
"""simple docstring"""
def __init__( self : Any , __SCREAMING_SNAKE_CASE : int ) -> Any:
__UpperCAmelCase =lock_file
return None
def __str__( self : List[str] ) -> Any:
__UpperCAmelCase =f'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class _A :
"""simple docstring"""
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Tuple ) -> Optional[int]:
__UpperCAmelCase =lock
return None
def __enter__( self : Tuple ) -> Optional[int]:
return self.lock
def __exit__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
self.lock.release()
return None
class _A :
"""simple docstring"""
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple=-1 , __SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> List[Any]:
__UpperCAmelCase =max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__UpperCAmelCase =self.hash_filename_if_too_long(_lowerCAmelCase , _lowerCAmelCase )
# The path to the lock file.
__UpperCAmelCase =lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__UpperCAmelCase =None
# The default timeout value.
__UpperCAmelCase =timeout
# We use this lock primarily for the lock counter.
__UpperCAmelCase =threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__UpperCAmelCase =0
return None
@property
def _a ( self : Tuple ) -> Union[str, Any]:
return self._lock_file
@property
def _a ( self : Tuple ) -> int:
return self._timeout
@timeout.setter
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]:
__UpperCAmelCase =float(_lowerCAmelCase )
return None
def _a ( self : Any ) -> Optional[Any]:
raise NotImplementedError()
def _a ( self : List[Any] ) -> List[str]:
raise NotImplementedError()
@property
def _a ( self : Tuple ) -> Dict:
return self._lock_file_fd is not None
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : Dict=0.05 ) -> Tuple:
if timeout is None:
__UpperCAmelCase =self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__UpperCAmelCase =id(self )
__UpperCAmelCase =self._lock_file
__UpperCAmelCase =time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(f'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
f'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(_lowerCAmelCase )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__UpperCAmelCase =max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : Any=False ) -> Union[str, Any]:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__UpperCAmelCase =id(self )
__UpperCAmelCase =self._lock_file
logger().debug(f'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
__UpperCAmelCase =0
logger().debug(f'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self : List[str] ) -> Optional[Any]:
self.acquire()
return self
def __exit__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict ) -> Tuple:
self.release()
return None
def __del__( self : Dict ) -> Any:
self.release(force=_lowerCAmelCase )
return None
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int ) -> str:
__UpperCAmelCase =os.path.basename(_lowerCAmelCase )
if len(_lowerCAmelCase ) > max_length and max_length > 0:
__UpperCAmelCase =os.path.dirname(_lowerCAmelCase )
__UpperCAmelCase =str(hash(_lowerCAmelCase ) )
__UpperCAmelCase =filename[: max_length - len(_lowerCAmelCase ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(_lowerCAmelCase , _lowerCAmelCase )
else:
return path
class _A ( __lowercase ):
"""simple docstring"""
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str]=-1 , __SCREAMING_SNAKE_CASE : Tuple=None ) -> Dict:
from .file_utils import relative_to_absolute_path
super().__init__(_lowerCAmelCase , timeout=_lowerCAmelCase , max_filename_length=_lowerCAmelCase )
__UpperCAmelCase ="""\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def _a ( self : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase =os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__UpperCAmelCase =os.open(self._lock_file , _lowerCAmelCase )
except OSError:
pass
else:
try:
msvcrt.locking(_lowerCAmelCase , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(_lowerCAmelCase )
else:
__UpperCAmelCase =fd
return None
def _a ( self : int ) -> List[str]:
__UpperCAmelCase =self._lock_file_fd
__UpperCAmelCase =None
msvcrt.locking(_lowerCAmelCase , msvcrt.LK_UNLCK , 1 )
os.close(_lowerCAmelCase )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class _A ( __lowercase ):
"""simple docstring"""
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str=-1 , __SCREAMING_SNAKE_CASE : Dict=None ) -> str:
__UpperCAmelCase =os.statvfs(os.path.dirname(_lowerCAmelCase ) ).f_namemax
super().__init__(_lowerCAmelCase , timeout=_lowerCAmelCase , max_filename_length=_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Dict:
__UpperCAmelCase =os.O_RDWR | os.O_CREAT | os.O_TRUNC
__UpperCAmelCase =os.open(self._lock_file , _lowerCAmelCase )
try:
fcntl.flock(_lowerCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(_lowerCAmelCase )
else:
__UpperCAmelCase =fd
return None
def _a ( self : Tuple ) -> Optional[Any]:
__UpperCAmelCase =self._lock_file_fd
__UpperCAmelCase =None
fcntl.flock(_lowerCAmelCase , fcntl.LOCK_UN )
os.close(_lowerCAmelCase )
return None
class _A ( __lowercase ):
"""simple docstring"""
def _a ( self : Optional[int] ) -> List[Any]:
__UpperCAmelCase =os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__UpperCAmelCase =os.open(self._lock_file , _lowerCAmelCase )
except OSError:
pass
else:
__UpperCAmelCase =fd
return None
def _a ( self : Union[str, Any] ) -> int:
os.close(self._lock_file_fd )
__UpperCAmelCase =None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__A = None
if msvcrt:
__A = WindowsFileLock
elif fcntl:
__A = UnixFileLock
else:
__A = SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| 68 |
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
UpperCAmelCase_ =[p / w for p, w in zip(lowercase__ , lowercase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
UpperCAmelCase_ =sorted(lowercase__ )
# declaring useful variables
UpperCAmelCase_ =len(lowercase__ )
UpperCAmelCase_ =0
UpperCAmelCase_ =0
UpperCAmelCase_ =0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
UpperCAmelCase_ =sorted_profit_by_weight[length - i - 1]
UpperCAmelCase_ =profit_by_weight.index(lowercase__ )
UpperCAmelCase_ =-1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
__lowercase : List[str] =[int(x) for x in input("""Input profits separated by spaces: """).split()]
__lowercase : Union[str, Any] =[int(x) for x in input("""Input weights separated by spaces: """).split()]
__lowercase : Tuple =int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 54 | 0 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __lowercase ( snake_case ):
"""simple docstring"""
def is_in_circle(snake_case, snake_case ) -> bool:
__magic_name__ :Optional[Any] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__magic_name__ :Optional[int] = mean(
int(is_in_circle(uniform(-1.0, 1.0 ), uniform(-1.0, 1.0 ) ) )
for _ in range(lowercase__ ) )
# The ratio of the area for circle to square is pi/4.
__magic_name__ :str = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def __lowercase ( snake_case, snake_case, snake_case = 0.0, snake_case = 1.0, ):
"""simple docstring"""
return mean(
function_to_integrate(uniform(lowercase__, lowercase__ ) ) for _ in range(lowercase__ ) ) * (max_value - min_value)
def __lowercase ( snake_case, snake_case = 0.0, snake_case = 1.0 ):
"""simple docstring"""
def identity_function(snake_case ) -> float:
return x
__magic_name__ :str = area_under_curve_estimator(
lowercase__, lowercase__, lowercase__, lowercase__ )
__magic_name__ :Dict = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print('''******************''' )
def __lowercase ( snake_case ):
"""simple docstring"""
def function_to_integrate(snake_case ) -> float:
return sqrt(4.0 - x * x )
__magic_name__ :Tuple = area_under_curve_estimator(
lowercase__, lowercase__, 0.0, 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowercase : Dict ={
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any =["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] =[
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] =[
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__lowercase : Union[str, Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 54 | 0 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class __lowercase :
def __init__( self : Any , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=1_00 , __lowerCamelCase : Optional[Any]=13 , __lowerCamelCase : str=30 , __lowerCamelCase : str=2 , __lowerCamelCase : str=3 , __lowerCamelCase : List[str]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[str]=32 , __lowerCamelCase : str=4 , __lowerCamelCase : List[str]=4 , __lowerCamelCase : str=37 , __lowerCamelCase : str="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : str=10 , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : Dict=None , __lowerCamelCase : Any=[0, 1, 2, 3] , ) -> Optional[int]:
'''simple docstring'''
lowercase = parent
lowercase = 1_00
lowercase = batch_size
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = is_training
lowercase = use_labels
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = scope
lowercase = out_indices
lowercase = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase = (image_size // patch_size) ** 2
lowercase = num_patches + 1
def __a ( self : Optional[int] ) -> Any:
'''simple docstring'''
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels, pixel_labels
def __a ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __a ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase = BeitModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ) -> Dict:
'''simple docstring'''
lowercase = BeitForMaskedImageModeling(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __a ( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.type_sequence_label_size
lowercase = BeitForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase = 1
lowercase = BeitForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : int ) -> str:
'''simple docstring'''
lowercase = self.num_labels
lowercase = BeitForSemanticSegmentation(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __a ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
lowercase ,lowercase ,lowercase ,lowercase = config_and_inputs
lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( __lowercase , __lowercase , unittest.TestCase ):
lowercase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
def __a ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowercase = BeitModelTester(self )
lowercase = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def __a ( self : List[Any] ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''' )
def __a ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def __a ( self : List[Any] ) -> Any:
'''simple docstring'''
pass
def __a ( self : str ) -> Optional[Any]:
'''simple docstring'''
lowercase ,lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def __a ( self : str ) -> List[str]:
'''simple docstring'''
lowercase ,lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def __a ( self : Any ) -> int:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __a ( self : str ) -> Tuple:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase )
def __a ( self : List[Any] ) -> Any:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
def __a ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCAmelCase )
def __a ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase ,lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(_lowerCAmelCase ), BeitForMaskedImageModeling]:
continue
lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
lowercase = model(**_lowerCAmelCase ).loss
loss.backward()
def __a ( self : List[Any] ) -> Any:
'''simple docstring'''
lowercase ,lowercase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase = False
lowercase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(_lowerCAmelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase = model_class(_lowerCAmelCase )
model.gradient_checkpointing_enable()
model.to(_lowerCAmelCase )
model.train()
lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
lowercase = model(**_lowerCAmelCase ).loss
loss.backward()
def __a ( self : Tuple ) -> Dict:
'''simple docstring'''
lowercase ,lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = _config_zero_init(_lowerCAmelCase )
for model_class in self.all_model_classes:
lowercase = model_class(config=_lowerCAmelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def __a ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = BeitModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
@cached_property
def __a ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __a ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(_lowerCAmelCase )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=_lowerCAmelCase , return_tensors='''pt''' ).pixel_values.to(_lowerCAmelCase )
# prepare bool_masked_pos
lowercase = torch.ones((1, 1_96) , dtype=torch.bool ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase = model(pixel_values=_lowerCAmelCase , bool_masked_pos=_lowerCAmelCase )
lowercase = outputs.logits
# verify the logits
lowercase = torch.Size((1, 1_96, 81_92) )
self.assertEqual(logits.shape , _lowerCAmelCase )
lowercase = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , _lowerCAmelCase , atol=1E-2 ) )
@slow
def __a ( self : Optional[int] ) -> Any:
'''simple docstring'''
lowercase = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(_lowerCAmelCase )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=_lowerCAmelCase , return_tensors='''pt''' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase = model(**_lowerCAmelCase )
lowercase = outputs.logits
# verify the logits
lowercase = torch.Size((1, 10_00) )
self.assertEqual(logits.shape , _lowerCAmelCase )
lowercase = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
lowercase = 2_81
self.assertEqual(logits.argmax(-1 ).item() , _lowerCAmelCase )
@slow
def __a ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to(
_lowerCAmelCase )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=_lowerCAmelCase , return_tensors='''pt''' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase = model(**_lowerCAmelCase )
lowercase = outputs.logits
# verify the logits
lowercase = torch.Size((1, 2_18_41) )
self.assertEqual(logits.shape , _lowerCAmelCase )
lowercase = torch.tensor([1.6881, -0.2787, 0.5901] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
lowercase = 23_96
self.assertEqual(logits.argmax(-1 ).item() , _lowerCAmelCase )
@slow
def __a ( self : str ) -> str:
'''simple docstring'''
lowercase = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
lowercase = model.to(_lowerCAmelCase )
lowercase = BeitImageProcessor(do_resize=_lowerCAmelCase , size=6_40 , do_center_crop=_lowerCAmelCase )
lowercase = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
lowercase = Image.open(ds[0]['''file'''] )
lowercase = image_processor(images=_lowerCAmelCase , return_tensors='''pt''' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase = model(**_lowerCAmelCase )
lowercase = outputs.logits
# verify the logits
lowercase = torch.Size((1, 1_50, 1_60, 1_60) )
self.assertEqual(logits.shape , _lowerCAmelCase )
lowercase = version.parse(PIL.__version__ ) < version.parse('''9.0.0''' )
if is_pillow_less_than_a:
lowercase = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=_lowerCAmelCase , )
else:
lowercase = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=_lowerCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def __a ( self : int ) -> Union[str, Any]:
'''simple docstring'''
lowercase = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
lowercase = model.to(_lowerCAmelCase )
lowercase = BeitImageProcessor(do_resize=_lowerCAmelCase , size=6_40 , do_center_crop=_lowerCAmelCase )
lowercase = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
lowercase = Image.open(ds[0]['''file'''] )
lowercase = image_processor(images=_lowerCAmelCase , return_tensors='''pt''' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase = model(**_lowerCAmelCase )
lowercase = outputs.logits.detach().cpu()
lowercase = image_processor.post_process_semantic_segmentation(outputs=_lowerCAmelCase , target_sizes=[(5_00, 3_00)] )
lowercase = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , _lowerCAmelCase )
lowercase = image_processor.post_process_semantic_segmentation(outputs=_lowerCAmelCase )
lowercase = torch.Size((1_60, 1_60) )
self.assertEqual(segmentation[0].shape , _lowerCAmelCase )
| 604 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a__ ( lowercase__ , lowercase__ , lowercase__=1_0_2_4 , lowercase__=1_0_2_4 , lowercase__=False , **lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =AutoTokenizer.from_pretrained(lowercase__ )
UpperCAmelCase_ =SeqaSeqDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ , type_path="train" , **lowercase__ )
UpperCAmelCase_ =tok.pad_token_id
def get_lens(lowercase__ ):
UpperCAmelCase_ =tqdm(
DataLoader(lowercase__ , batch_size=5_1_2 , num_workers=8 , shuffle=lowercase__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCAmelCase_ =[]
for batch in dl:
UpperCAmelCase_ =batch["input_ids"].ne(lowercase__ ).sum(1 ).tolist()
UpperCAmelCase_ =batch["labels"].ne(lowercase__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowercase__ , lowercase__ ):
max_lens.append(max(lowercase__ , lowercase__ ) )
else:
max_lens.extend(lowercase__ )
return max_lens
UpperCAmelCase_ =get_lens(lowercase__ )
UpperCAmelCase_ =SeqaSeqDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ , type_path="val" , **lowercase__ )
UpperCAmelCase_ =get_lens(lowercase__ )
pickle_save(lowercase__ , train_ds.len_file )
pickle_save(lowercase__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 54 | 0 |
"""simple docstring"""
from collections import defaultdict
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : List[str] = 1
_UpperCamelCase : Any = True
for v in tree[start]:
if v not in visited:
ret += dfs(lowercase__ )
if ret % 2 == 0:
cuts.append(lowercase__ )
return ret
def lowercase__ ( ) -> Dict:
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
lowerCamelCase__ = 10, 9
lowerCamelCase__ = defaultdict(list)
lowerCamelCase__ = {}
lowerCamelCase__ = []
lowerCamelCase__ = 0
lowerCamelCase__ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 624 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A :
def __init__( self: Any , _lowerCAmelCase: str , _lowerCAmelCase: Optional[Any]=13 , _lowerCAmelCase: List[str]=30 , _lowerCAmelCase: List[Any]=2 , _lowerCAmelCase: List[str]=3 , _lowerCAmelCase: Dict=True , _lowerCAmelCase: int=True , _lowerCAmelCase: Tuple=32 , _lowerCAmelCase: str=2 , _lowerCAmelCase: Dict=4 , _lowerCAmelCase: Dict=37 , _lowerCAmelCase: Optional[Any]="gelu" , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: Union[str, Any]=10 , _lowerCAmelCase: str=0.02 , _lowerCAmelCase: Optional[Any]=3 , _lowerCAmelCase: Optional[int]=None , ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =image_size
UpperCAmelCase_ =patch_size
UpperCAmelCase_ =num_channels
UpperCAmelCase_ =is_training
UpperCAmelCase_ =use_labels
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =num_attention_heads
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =type_sequence_label_size
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ =(image_size // patch_size) ** 2
UpperCAmelCase_ =num_patches + 1
def lowerCAmelCase__ ( self: Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ =None
if self.use_labels:
UpperCAmelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ =self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: int , _lowerCAmelCase: Any , _lowerCAmelCase: List[str] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =TFViTModel(config=_lowerCAmelCase )
UpperCAmelCase_ =model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ =self.image_size // 2
UpperCAmelCase_ =pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ =model(_lowerCAmelCase , interpolate_pos_encoding=_lowerCAmelCase , training=_lowerCAmelCase )
UpperCAmelCase_ =(image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.type_sequence_label_size
UpperCAmelCase_ =TFViTForImageClassification(_lowerCAmelCase )
UpperCAmelCase_ =model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ =self.image_size // 2
UpperCAmelCase_ =pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ =model(_lowerCAmelCase , interpolate_pos_encoding=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ =1
UpperCAmelCase_ =TFViTForImageClassification(_lowerCAmelCase )
UpperCAmelCase_ =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ =model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ ={"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class A ( __lowercase , __lowercase , unittest.TestCase ):
_snake_case =(TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
_snake_case =(
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
_snake_case =False
_snake_case =False
_snake_case =False
def lowerCAmelCase__ ( self: int ) -> int:
'''simple docstring'''
UpperCAmelCase_ =TFViTModelTester(self )
UpperCAmelCase_ =ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowerCAmelCase__ ( self: Dict ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowerCAmelCase__ ( self: int ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: List[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ =model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase_ =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , tf.keras.layers.Layer ) )
def lowerCAmelCase__ ( self: List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ =model_class(_lowerCAmelCase )
UpperCAmelCase_ =inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ =[*signature.parameters.keys()]
UpperCAmelCase_ =["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def lowerCAmelCase__ ( self: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def lowerCAmelCase__ ( self: Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =TFViTModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(_lowerCAmelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class A ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self: Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" )
UpperCAmelCase_ =self.default_image_processor
UpperCAmelCase_ =prepare_img()
UpperCAmelCase_ =image_processor(images=_lowerCAmelCase , return_tensors="tf" )
# forward pass
UpperCAmelCase_ =model(**_lowerCAmelCase )
# verify the logits
UpperCAmelCase_ =tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
UpperCAmelCase_ =tf.constant([-0.27_44, 0.82_15, -0.08_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 )
| 54 | 0 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__lowerCAmelCase : Optional[Any] =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__lowerCAmelCase : Union[str, Any] =""" def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
"""
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
lowercase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
lowercase = self.transformer_dir
shutil.copy(
os.path.join(_lowerCAmelCase , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def A__ ( self ):
"""simple docstring"""
lowercase = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ):
"""simple docstring"""
lowercase = comment + f'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
lowercase = comment + f'\nclass {class_name}(nn.Module):\n' + overwrite_result
lowercase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowercase = black.format_str(_lowerCAmelCase , mode=_lowerCAmelCase )
lowercase = os.path.join(self.transformer_dir , """new_code.py""" )
with open(_lowerCAmelCase , """w""" , newline="""\n""" ) as f:
f.write(_lowerCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_lowerCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_lowerCAmelCase )
with open(_lowerCAmelCase , """r""" ) as f:
self.assertTrue(f.read() , _lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , _lowerCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , _lowerCAmelCase ) , )
# Copy consistency with a really long name
lowercase = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}' , f'{long_class_name}LMPredictionHead' , re.sub("""Bert""" , _lowerCAmelCase , _lowerCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , _lowerCAmelCase , overwrite_result=re.sub("""Bert""" , """TestModel""" , _lowerCAmelCase ) , )
def A__ ( self ):
"""simple docstring"""
lowercase = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
lowercase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
lowercase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
lowercase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
lowercase , lowercase = check_copies.convert_to_localized_md(
_lowerCAmelCase , _lowerCAmelCase , localized_readme["""format_model_list"""] )
self.assertFalse(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
lowercase , lowercase = check_copies.convert_to_localized_md(
_lowerCAmelCase , _lowerCAmelCase , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_lowerCAmelCase )
lowercase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
lowercase = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
lowercase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
lowercase , lowercase = check_copies.convert_to_localized_md(
_lowerCAmelCase , _lowerCAmelCase , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
| 359 |
from __future__ import annotations
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) == 0:
return False
UpperCAmelCase_ =len(lowercase__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , lowercase__ )
else:
return binary_search(a_list[midpoint + 1 :] , lowercase__ )
if __name__ == "__main__":
__lowercase : Tuple =input("""Enter numbers separated by comma:\n""").strip()
__lowercase : Optional[Any] =[int(item.strip()) for item in user_input.split(""",""")]
__lowercase : List[Any] =int(input("""Enter the number to be found in the list:\n""").strip())
__lowercase : Optional[Any] ="""""" if binary_search(sequence, target) else """not """
print(f"""{target} was {not_str}found in {sequence}""")
| 54 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase (__lowercase ,unittest.TestCase ):
lowerCamelCase__ : Optional[int] = DDIMPipeline
lowerCamelCase__ : Tuple = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowerCamelCase__ : Dict = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'latents',
'callback',
'callback_steps',
}
lowerCamelCase__ : int = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowerCamelCase__ : Optional[Any] = False
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
SCREAMING_SNAKE_CASE__ = DDIMScheduler()
SCREAMING_SNAKE_CASE__ = {"""unet""": unet, """scheduler""": scheduler}
return components
def SCREAMING_SNAKE_CASE ( self : str , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any]=0 ) -> Tuple:
if str(_lowerCAmelCase ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(_lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = """cpu"""
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = self.pipeline_class(**_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = pipe(**_lowerCAmelCase ).images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 3_2, 3_2, 3) )
SCREAMING_SNAKE_CASE__ = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
SCREAMING_SNAKE_CASE__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCAmelCase , 1e-3 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
SCREAMING_SNAKE_CASE__ = """google/ddpm-cifar10-32"""
SCREAMING_SNAKE_CASE__ = UNetaDModel.from_pretrained(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = DDIMScheduler()
SCREAMING_SNAKE_CASE__ = DDIMPipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
ddim.to(_lowerCAmelCase )
ddim.set_progress_bar_config(disable=_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = ddim(generator=_lowerCAmelCase , eta=0.0 , output_type="""numpy""" ).images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.1_723, 0.1_617, 0.1_600, 0.1_626, 0.1_497, 0.1_513, 0.1_505, 0.1_442, 0.1_453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
SCREAMING_SNAKE_CASE__ = """google/ddpm-ema-bedroom-256"""
SCREAMING_SNAKE_CASE__ = UNetaDModel.from_pretrained(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = DDIMScheduler.from_pretrained(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = DDIMPipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
ddpm.to(_lowerCAmelCase )
ddpm.set_progress_bar_config(disable=_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = ddpm(generator=_lowerCAmelCase , output_type="""numpy""" ).images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.0_060, 0.0_201, 0.0_344, 0.0_024, 0.0_018, 0.0_002, 0.0_022, 0.0_000, 0.0_069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 196 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__lowercase : Any =(
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
__lowercase : Union[str, Any] =(
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
__lowercase : List[str] =(
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
__lowercase : str =(
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
__lowercase : Union[str, Any] =(
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 14]),
("""2H 5D 3C AS 5S""", False, [14, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [14, 13, 12, 11, 10]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
__lowercase : str =(
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
__lowercase : int =(
("""JH AH TH KH QH""", 23),
("""JH 9H TH KH QH""", 22),
("""JC KH JS JD JH""", 21),
("""KH KC 3S 3H 3D""", 20),
("""8C 9C 5C 3C TC""", 19),
("""JS QS 9H TS KH""", 18),
("""7C 7S KH 2H 7H""", 17),
("""3C KH 5D 5S KH""", 16),
("""QH 8H KD JH 8S""", 15),
("""2D 6D 9D TH 7D""", 14),
)
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =randrange(len(lowercase__ ) ), randrange(len(lowercase__ ) )
UpperCAmelCase_ =["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)]
UpperCAmelCase_ , UpperCAmelCase_ =SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def a__ ( lowercase__ = 1_0_0 ):
'''simple docstring'''
return (generate_random_hand() for _ in range(lowercase__ ))
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_flush() == expected
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_straight() == expected
@pytest.mark.parametrize("hand, expected, card_values" , lowercase__ )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =PokerHand(lowercase__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_same_kind() == expected
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._hand_type == expected
@pytest.mark.parametrize("hand, other, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
@pytest.mark.parametrize("hand, other, expected" , generate_random_hands() )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[PokerHand(lowercase__ ) for hand in SORTED_HANDS]
UpperCAmelCase_ =poker_hands.copy()
shuffle(lowercase__ )
UpperCAmelCase_ =chain(sorted(lowercase__ ) )
for index, hand in enumerate(lowercase__ ):
assert hand == poker_hands[index]
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[PokerHand("2D AC 3H 4H 5S" ), PokerHand("2S 3H 4H 5S 6C" )]
pokerhands.sort(reverse=lowercase__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =PokerHand("2C 4S AS 3D 5C" )
UpperCAmelCase_ =True
UpperCAmelCase_ =[5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =0
UpperCAmelCase_ =os.path.abspath(os.path.dirname(lowercase__ ) )
UpperCAmelCase_ =os.path.join(lowercase__ , "poker_hands.txt" )
with open(lowercase__ ) as file_hand:
for line in file_hand:
UpperCAmelCase_ =line[:1_4].strip()
UpperCAmelCase_ =line[1_5:].strip()
UpperCAmelCase_ , UpperCAmelCase_ =PokerHand(lowercase__ ), PokerHand(lowercase__ )
UpperCAmelCase_ =player.compare_with(lowercase__ )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 54 | 0 |
"""simple docstring"""
import math
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (
not isinstance(lowercase__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * power_factor
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (
not isinstance(lowercase__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 259 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowercase : int =logging.get_logger(__name__)
class A ( __lowercase ):
_snake_case =['''pixel_values''']
def __init__( self: List[Any] , _lowerCAmelCase: bool = True , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: float = None , _lowerCAmelCase: PILImageResampling = PILImageResampling.BILINEAR , _lowerCAmelCase: bool = True , _lowerCAmelCase: Union[int, float] = 1 / 255 , _lowerCAmelCase: bool = True , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , **_lowerCAmelCase: Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
UpperCAmelCase_ =size if size is not None else {"shortest_edge": 384}
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =do_resize
UpperCAmelCase_ =size
# Default value set here for backwards compatibility where the value in config is None
UpperCAmelCase_ =crop_pct if crop_pct is not None else 224 / 256
UpperCAmelCase_ =resample
UpperCAmelCase_ =do_rescale
UpperCAmelCase_ =rescale_factor
UpperCAmelCase_ =do_normalize
UpperCAmelCase_ =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ =image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Dict[str, int] , _lowerCAmelCase: float , _lowerCAmelCase: PILImageResampling = PILImageResampling.BICUBIC , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Any , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
UpperCAmelCase_ =size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
UpperCAmelCase_ =int(shortest_edge / crop_pct )
UpperCAmelCase_ =get_resize_output_image_size(_lowerCAmelCase , size=_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_lowerCAmelCase , size=(shortest_edge, shortest_edge) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_lowerCAmelCase , size=(shortest_edge, shortest_edge) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Union[int, float] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: str , ) -> Optional[Any]:
'''simple docstring'''
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Union[float, List[float]] , _lowerCAmelCase: Union[float, List[float]] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Dict , ) -> np.ndarray:
'''simple docstring'''
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: ImageInput , _lowerCAmelCase: bool = None , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: float = None , _lowerCAmelCase: PILImageResampling = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: float = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[str, TensorType]] = None , _lowerCAmelCase: ChannelDimension = ChannelDimension.FIRST , **_lowerCAmelCase: Optional[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCAmelCase_ =do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ =crop_pct if crop_pct is not None else self.crop_pct
UpperCAmelCase_ =resample if resample is not None else self.resample
UpperCAmelCase_ =do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ =do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ =image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ =image_std if image_std is not None else self.image_std
UpperCAmelCase_ =size if size is not None else self.size
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ =[to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
UpperCAmelCase_ =[self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , crop_pct=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_rescale:
UpperCAmelCase_ =[self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
if do_normalize:
UpperCAmelCase_ =[self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images]
UpperCAmelCase_ =[to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
UpperCAmelCase_ ={"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 54 | 0 |
from manim import *
class __snake_case (__lowercase ):
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_lowerCAmelCase : str = Rectangle(height=0.5 , width=0.5 )
_lowerCAmelCase : Optional[int] = Rectangle(height=0.25 , width=0.25 )
_lowerCAmelCase : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_lowerCAmelCase : Union[str, Any] = [mem.copy() for i in range(6 )]
_lowerCAmelCase : Tuple = [mem.copy() for i in range(6 )]
_lowerCAmelCase : Dict = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
_lowerCAmelCase : List[Any] = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
_lowerCAmelCase : List[Any] = VGroup(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
_lowerCAmelCase : Dict = Text("""CPU""" , font_size=24 )
_lowerCAmelCase : int = Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCAmelCase )
_lowerCAmelCase : Tuple = [mem.copy() for i in range(4 )]
_lowerCAmelCase : Optional[Any] = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
_lowerCAmelCase : Any = Text("""GPU""" , font_size=24 )
_lowerCAmelCase : Tuple = Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(_lowerCAmelCase )
_lowerCAmelCase : Tuple = [mem.copy() for i in range(6 )]
_lowerCAmelCase : Tuple = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
_lowerCAmelCase : Any = Text("""Model""" , font_size=24 )
_lowerCAmelCase : Any = Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(_lowerCAmelCase )
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : List[str] = []
for i, rect in enumerate(_lowerCAmelCase ):
rect.set_stroke(_lowerCAmelCase )
_lowerCAmelCase : Union[str, Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_lowerCAmelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowerCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_lowerCAmelCase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_lowerCAmelCase , buff=0.0 )
self.add(_lowerCAmelCase )
model_cpu_arr.append(_lowerCAmelCase )
self.add(*_lowerCAmelCase , *_lowerCAmelCase , *_lowerCAmelCase )
_lowerCAmelCase : Optional[int] = [mem.copy() for i in range(6 )]
_lowerCAmelCase : List[str] = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
_lowerCAmelCase : Optional[Any] = Text("""Loaded Checkpoint""" , font_size=24 )
_lowerCAmelCase : Dict = Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase )
checkpoint.move_to([3, 0.5, 0] )
self.add(_lowerCAmelCase )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : List[str] = []
for i, rect in enumerate(_lowerCAmelCase ):
_lowerCAmelCase : Tuple = fill.copy().set_fill(_lowerCAmelCase , opacity=0.7 )
target.move_to(_lowerCAmelCase )
ckpt_arr.append(_lowerCAmelCase )
_lowerCAmelCase : Union[str, Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_lowerCAmelCase )
self.add(*_lowerCAmelCase , *_lowerCAmelCase )
_lowerCAmelCase : int = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowerCAmelCase : Dict = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase : Any = MarkupText(
f"<span fgcolor=\'{BLUE}\'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(_lowerCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_lowerCAmelCase )
_lowerCAmelCase : str = MarkupText(
f"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=24 , )
step_a.move_to([2, 2, 0] )
_lowerCAmelCase : Tuple = [meta_mem.copy() for i in range(6 )]
_lowerCAmelCase : List[str] = [meta_mem.copy() for i in range(6 )]
_lowerCAmelCase : Union[str, Any] = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
_lowerCAmelCase : Optional[int] = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
_lowerCAmelCase : List[Any] = VGroup(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
_lowerCAmelCase : int = Text("""Disk""" , font_size=24 )
_lowerCAmelCase : Any = Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_lowerCAmelCase , run_time=3 ) , Write(_lowerCAmelCase , run_time=1 ) , Create(_lowerCAmelCase , run_time=1 ) )
_lowerCAmelCase : Union[str, Any] = []
for i, rect in enumerate(_lowerCAmelCase ):
_lowerCAmelCase : List[str] = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_lowerCAmelCase , run_time=1.5 ) )
self.play(*_lowerCAmelCase )
self.play(FadeOut(_lowerCAmelCase ) )
_lowerCAmelCase : Optional[Any] = MarkupText(f"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCAmelCase , run_time=3 ) )
self.play(
FadeOut(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase , *_lowerCAmelCase ) , )
self.wait()
| 429 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__lowercase : List[Any] =WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =test_results.split(" " )
UpperCAmelCase_ =0
UpperCAmelCase_ =0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
UpperCAmelCase_ =expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase__ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={}
UpperCAmelCase_ =None
UpperCAmelCase_ =False
for line in failures_short_lines.split("\n" ):
if re.search(R"_ \[doctest\]" , lowercase__ ):
UpperCAmelCase_ =True
UpperCAmelCase_ =line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
UpperCAmelCase_ =line
UpperCAmelCase_ =False
return failures
class A :
def __init__( self: Optional[Any] , _lowerCAmelCase: str , _lowerCAmelCase: Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =title
UpperCAmelCase_ =doc_test_results["time_spent"].split("," )[0]
UpperCAmelCase_ =doc_test_results["success"]
UpperCAmelCase_ =doc_test_results["failures"]
UpperCAmelCase_ =self.n_success + self.n_failures
# Failures and success of the modeling tests
UpperCAmelCase_ =doc_test_results
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =[self._time_spent]
UpperCAmelCase_ =0
for time in time_spent:
UpperCAmelCase_ =time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_lowerCAmelCase ) == 1:
UpperCAmelCase_ =[0, 0, time_parts[0]]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F'{int(_lowerCAmelCase )}h{int(_lowerCAmelCase )}m{int(_lowerCAmelCase )}s'
@property
def lowerCAmelCase__ ( self: int ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
F' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def lowerCAmelCase__ ( self: Tuple ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =40
UpperCAmelCase_ ={k: v["failed"] for k, v in doc_test_results.items() if isinstance(_lowerCAmelCase , _lowerCAmelCase )}
UpperCAmelCase_ =""
for category, failures in category_failures.items():
if len(_lowerCAmelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += F'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_lowerCAmelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =[self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_lowerCAmelCase )
@staticmethod
def lowerCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =[
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(_lowerCAmelCase )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=_lowerCAmelCase , )
def lowerCAmelCase__ ( self: Dict ) -> List[str]:
'''simple docstring'''
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
UpperCAmelCase_ =F'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else "All tests passed."
UpperCAmelCase_ =client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=_lowerCAmelCase , )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[str] , _lowerCAmelCase: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =""
for key, value in failures.items():
UpperCAmelCase_ =value[:200] + " [Truncated]" if len(_lowerCAmelCase ) > 250 else value
failures_text += F'*{key}*\n_{value}_\n\n'
UpperCAmelCase_ =job_name
UpperCAmelCase_ ={"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
UpperCAmelCase_ ={
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowerCAmelCase__ ( self: Any ) -> List[str]:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
UpperCAmelCase_ =self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
UpperCAmelCase_ =sorted(self.doc_test_results.items() , key=lambda _lowerCAmelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
UpperCAmelCase_ =F'*Num failures* :{len(job_result["failed"] )} \n'
UpperCAmelCase_ =job_result["failures"]
UpperCAmelCase_ =self.get_reply_blocks(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , text=_lowerCAmelCase )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=F'Results for {job}' , blocks=_lowerCAmelCase , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =os.environ["GITHUB_RUN_ID"]
UpperCAmelCase_ =F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
UpperCAmelCase_ =requests.get(lowercase__ ).json()
UpperCAmelCase_ ={}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
UpperCAmelCase_ =math.ceil((result["total_count"] - 1_0_0) / 1_0_0 )
for i in range(lowercase__ ):
UpperCAmelCase_ =requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , lowercase__ )
return {}
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={}
if os.path.exists(lowercase__ ):
UpperCAmelCase_ =os.listdir(lowercase__ )
for file in files:
try:
with open(os.path.join(lowercase__ , lowercase__ ) , encoding="utf-8" ) as f:
UpperCAmelCase_ =f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowercase__ , lowercase__ )}.' ) from e
return _artifact
def a__ ( ):
'''simple docstring'''
class A :
def __init__( self: Tuple , _lowerCAmelCase: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =name
UpperCAmelCase_ =[]
def __str__( self: Optional[int] ) -> Tuple:
'''simple docstring'''
return self.name
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: str ) -> List[Any]:
'''simple docstring'''
self.paths.append({"name": self.name, "path": path} )
UpperCAmelCase_ ={}
UpperCAmelCase_ =filter(os.path.isdir , os.listdir() )
for directory in directories:
UpperCAmelCase_ =directory
if artifact_name not in _available_artifacts:
UpperCAmelCase_ =Artifact(lowercase__ )
_available_artifacts[artifact_name].add_path(lowercase__ )
return _available_artifacts
if __name__ == "__main__":
__lowercase : str =get_job_links()
__lowercase : Dict =retrieve_available_artifacts()
__lowercase : Optional[int] =collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__lowercase : Any ={
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__lowercase : Tuple =github_actions_job_links.get("""run_doctests""")
__lowercase : int =available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
__lowercase : str =retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
__lowercase , __lowercase , __lowercase : Tuple =handle_test_results(artifact["""stats"""])
__lowercase : int =failed
__lowercase : int =success
__lowercase : str =time_spent[1:-1] + """, """
__lowercase : str =extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
__lowercase : int =line.replace("""FAILED """, """""")
__lowercase : List[Any] =line.split()[0].replace("""\n""", """""")
if "::" in line:
__lowercase , __lowercase : Any =line.split("""::""")
else:
__lowercase , __lowercase : Dict =line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__lowercase : Optional[int] =docs[file_regex]
doc_test_results[category]["failed"].append(test)
__lowercase : Tuple =all_failures[test] if test in all_failures else """N/A"""
__lowercase : Optional[int] =failure
break
__lowercase : Optional[int] =Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 54 | 0 |
'''simple docstring'''
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class a__ ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = PriorTransformer
_SCREAMING_SNAKE_CASE : Dict = 'hidden_states'
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = 4
_lowercase : Tuple = 8
_lowercase : List[str] = 7
_lowercase : Any = floats_tensor((batch_size, embedding_dim) ).to(_lowerCAmelCase )
_lowercase : int = floats_tensor((batch_size, embedding_dim) ).to(_lowerCAmelCase )
_lowercase : Optional[Any] = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _lowerCamelCase ( self , _UpperCamelCase=0 ):
"""simple docstring"""
torch.manual_seed(_lowerCAmelCase )
_lowercase : Any = 4
_lowercase : Union[str, Any] = 8
_lowercase : Any = 7
_lowercase : Optional[Any] = torch.randn((batch_size, embedding_dim) ).to(_lowerCAmelCase )
_lowercase : Dict = torch.randn((batch_size, embedding_dim) ).to(_lowerCAmelCase )
_lowercase : str = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return (4, 8)
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return (4, 8)
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Any = {
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
_lowercase : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase , _lowercase : Tuple = PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy" , output_loading_info=_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(_lowerCAmelCase )
_lowercase : Dict = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase , _lowercase : Union[str, Any] = self.prepare_init_args_and_inputs_for_common()
_lowercase : Any = self.model_class(**_lowerCAmelCase )
_lowercase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : int = [*signature.parameters.keys()]
_lowercase : List[str] = ["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2] , _lowerCAmelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" )
_lowercase : Union[str, Any] = model.to(_lowerCAmelCase )
if hasattr(_lowerCAmelCase , "set_default_attn_processor" ):
model.set_default_attn_processor()
_lowercase : Dict = self.get_dummy_seed_input()
with torch.no_grad():
_lowercase : str = model(**_lowerCAmelCase )[0]
_lowercase : List[str] = output[0, :5].flatten().cpu()
print(_lowerCAmelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
_lowercase : int = torch.tensor([-1.3_4_3_6, -0.2_8_7_0, 0.7_5_3_8, 0.4_3_6_8, -0.0_2_3_9] )
self.assertTrue(torch_all_close(_lowerCAmelCase , _lowerCAmelCase , rtol=1E-2 ) )
@slow
class a__ ( unittest.TestCase ):
def _lowerCamelCase ( self , _UpperCamelCase=1 , _UpperCamelCase=768 , _UpperCamelCase=77 , _UpperCamelCase=0 ):
"""simple docstring"""
torch.manual_seed(_lowerCAmelCase )
_lowercase : Optional[int] = batch_size
_lowercase : Optional[Any] = embedding_dim
_lowercase : Dict = num_embeddings
_lowercase : List[str] = torch.randn((batch_size, embedding_dim) ).to(_lowerCAmelCase )
_lowercase : Union[str, Any] = torch.randn((batch_size, embedding_dim) ).to(_lowerCAmelCase )
_lowercase : List[str] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5_8_6_1, 0.1_2_8_3, -0.0_9_3_1, 0.0_8_8_2, 0.4_4_7_6, 0.1_3_2_9, -0.0_4_9_8, 0.0_6_4_0]],
[37, [-0.4_9_1_3, 0.0_1_1_0, -0.0_4_8_3, 0.0_5_4_1, 0.4_9_5_4, -0.0_1_7_0, 0.0_3_5_4, 0.1_6_5_1]],
# fmt: on
] )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Dict = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior" , subfolder="prior" )
model.to(_lowerCAmelCase )
_lowercase : Union[str, Any] = self.get_dummy_seed_input(seed=_lowerCAmelCase )
with torch.no_grad():
_lowercase : Tuple = model(**_lowerCAmelCase )[0]
assert list(sample.shape ) == [1, 768]
_lowercase : int = sample[0, :8].flatten().cpu()
print(_lowerCAmelCase )
_lowercase : Optional[int] = torch.tensor(_lowerCAmelCase )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 )
| 245 |
def a__ ( lowercase__ = 2_0_0 ):
'''simple docstring'''
UpperCAmelCase_ =[1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
UpperCAmelCase_ =[0] * (pence + 1)
UpperCAmelCase_ =1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowercase__ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 7_3682
| 54 | 0 |
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase : Dict , UpperCamelCase : int , UpperCamelCase : str ):
"""simple docstring"""
_lowercase : str = None
_lowercase : List[Any] = None
_lowercase : Union[str, Any] = graph
self._normalize_graph(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : List[Any] = len(_lowerCAmelCase )
_lowercase : Tuple = None
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase : Any , UpperCamelCase : Dict ):
"""simple docstring"""
if sources is int:
_lowercase : Dict = [sources]
if sinks is int:
_lowercase : Union[str, Any] = [sinks]
if len(_lowerCAmelCase ) == 0 or len(_lowerCAmelCase ) == 0:
return
_lowercase : List[Any] = sources[0]
_lowercase : int = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(_lowerCAmelCase ) > 1 or len(_lowerCAmelCase ) > 1:
_lowercase : Optional[int] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
_lowercase : List[Any] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_lowercase : Dict = max_input_flow
_lowercase : Optional[Any] = 0
_lowercase : Union[str, Any] = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_lowercase : Optional[int] = max_input_flow
_lowercase : Any = size - 1
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
if self.maximum_flow_algorithm is None:
raise Exception('''You need to set maximum flow algorithm before.''' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowerCAmelCase_ ( self : int , UpperCamelCase : Tuple ):
"""simple docstring"""
_lowercase : str = algorithm(self )
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
_lowercase : Any = flow_network
_lowercase : List[str] = flow_network.verticesCount
_lowercase : Optional[Any] = flow_network.sourceIndex
_lowercase : Dict = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_lowercase : List[Any] = flow_network.graph
_lowercase : Union[str, Any] = False
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
if not self.executed:
self._algorithm()
_lowercase : Any = True
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
pass
class UpperCAmelCase__ ( __lowercase ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase : Optional[int] ):
"""simple docstring"""
super().__init__(_lowerCAmelCase )
# use this to save your result
_lowercase : Optional[Any] = -1
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
if not self.executed:
raise Exception('''You should execute algorithm before using its result!''' )
return self.maximum_flow
class UpperCAmelCase__ ( __lowercase ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase : List[Any] ):
"""simple docstring"""
super().__init__(_lowerCAmelCase )
_lowercase : Dict = [[0] * self.verticies_count for i in range(self.verticies_count )]
_lowercase : str = [0] * self.verticies_count
_lowercase : Optional[Any] = [0] * self.verticies_count
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
_lowercase : Dict = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_lowercase : List[Any] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_lowercase : List[Any] = 0
while i < len(_lowerCAmelCase ):
_lowercase : Tuple = vertices_list[i]
_lowercase : Union[str, Any] = self.heights[vertex_index]
self.process_vertex(_lowerCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(_lowerCAmelCase ) )
_lowercase : List[str] = 0
else:
i += 1
_lowercase : Union[str, Any] = sum(self.preflow[self.source_index] )
def lowerCAmelCase_ ( self : Optional[int] , UpperCamelCase : Optional[Any] ):
"""simple docstring"""
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(_lowerCAmelCase , _lowerCAmelCase )
self.relabel(_lowerCAmelCase )
def lowerCAmelCase_ ( self : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int ):
"""simple docstring"""
_lowercase : Dict = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowerCAmelCase_ ( self : str , UpperCamelCase : Any ):
"""simple docstring"""
_lowercase : Any = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_lowercase : Any = self.heights[to_index]
if min_height is not None:
_lowercase : int = min_height + 1
if __name__ == "__main__":
UpperCamelCase__ = [0]
UpperCamelCase__ = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
UpperCamelCase__ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
UpperCamelCase__ = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
UpperCamelCase__ = flow_network.find_maximum_flow()
print(F"""maximum flow is {maximum_flow}""")
| 322 |
import sys
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =len(lowercase__ )
UpperCAmelCase_ =[[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
UpperCAmelCase_ =[[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
for chain_length in range(2 , lowercase__ ):
for a in range(1 , n - chain_length + 1 ):
UpperCAmelCase_ =a + chain_length - 1
UpperCAmelCase_ =sys.maxsize
for c in range(lowercase__ , lowercase__ ):
UpperCAmelCase_ =(
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCAmelCase_ =cost
UpperCAmelCase_ =c
return matrix, sol
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if i == j:
print("A" + str(lowercase__ ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(lowercase__ , lowercase__ , optimal_solution[i][j] )
print_optiomal_solution(lowercase__ , optimal_solution[i][j] + 1 , lowercase__ )
print(")" , end=" " )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
UpperCAmelCase_ =len(lowercase__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCAmelCase_ , UpperCAmelCase_ =matrix_chain_order(lowercase__ )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowercase__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 54 | 0 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
_lowercase: List[str] = FileLock(str(tmpdir / '''foo.lock''' ) )
_lowercase: Tuple = FileLock(str(tmpdir / '''foo.lock''' ) )
_lowercase: int = 0.01
with locka.acquire():
with pytest.raises(lowercase__ ):
_lowercase: Dict = time.time()
locka.acquire(lowercase__ )
assert time.time() - _start > timeout
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
_lowercase: Union[str, Any] = '''a''' * 1_000 + '''.lock'''
_lowercase: str = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(lowercase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
_lowercase: str = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(lowercase__ ):
locka.acquire(0 )
| 353 |
from math import loga
def a__ ( lowercase__ ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(lowercase__ , lowercase__ ):
raise TypeError("Input value must be a 'int' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class _A ( __lowercase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = 42
class _A ( __lowercase , __lowercase ):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : int = 65536 , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : int = 2 , __SCREAMING_SNAKE_CASE : int = 2 , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : str = "fourier" , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , __SCREAMING_SNAKE_CASE : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , __SCREAMING_SNAKE_CASE : Tuple[str] = "UNetMidBlock1D" , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : Tuple[int] = (32, 32, 64) , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : int = 8 , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : bool = False , ) -> int:
super().__init__()
__UpperCAmelCase =sample_size
# time
if time_embedding_type == "fourier":
__UpperCAmelCase =GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_lowerCAmelCase , log=_lowerCAmelCase , flip_sin_to_cos=_lowerCAmelCase )
__UpperCAmelCase =2 * block_out_channels[0]
elif time_embedding_type == "positional":
__UpperCAmelCase =Timesteps(
block_out_channels[0] , flip_sin_to_cos=_lowerCAmelCase , downscale_freq_shift=_lowerCAmelCase )
__UpperCAmelCase =block_out_channels[0]
if use_timestep_embedding:
__UpperCAmelCase =block_out_channels[0] * 4
__UpperCAmelCase =TimestepEmbedding(
in_channels=_lowerCAmelCase , time_embed_dim=_lowerCAmelCase , act_fn=_lowerCAmelCase , out_dim=block_out_channels[0] , )
__UpperCAmelCase =nn.ModuleList([] )
__UpperCAmelCase =None
__UpperCAmelCase =nn.ModuleList([] )
__UpperCAmelCase =None
# down
__UpperCAmelCase =in_channels
for i, down_block_type in enumerate(_lowerCAmelCase ):
__UpperCAmelCase =output_channel
__UpperCAmelCase =block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
__UpperCAmelCase =i == len(_lowerCAmelCase ) - 1
__UpperCAmelCase =get_down_block(
_lowerCAmelCase , num_layers=_lowerCAmelCase , in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_lowerCAmelCase )
# mid
__UpperCAmelCase =get_mid_block(
_lowerCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_lowerCAmelCase , add_downsample=_lowerCAmelCase , )
# up
__UpperCAmelCase =list(reversed(_lowerCAmelCase ) )
__UpperCAmelCase =reversed_block_out_channels[0]
if out_block_type is None:
__UpperCAmelCase =out_channels
else:
__UpperCAmelCase =block_out_channels[0]
for i, up_block_type in enumerate(_lowerCAmelCase ):
__UpperCAmelCase =output_channel
__UpperCAmelCase =(
reversed_block_out_channels[i + 1] if i < len(_lowerCAmelCase ) - 1 else final_upsample_channels
)
__UpperCAmelCase =i == len(_lowerCAmelCase ) - 1
__UpperCAmelCase =get_up_block(
_lowerCAmelCase , num_layers=_lowerCAmelCase , in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_lowerCAmelCase )
__UpperCAmelCase =output_channel
# out
__UpperCAmelCase =norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
__UpperCAmelCase =get_out_block(
out_block_type=_lowerCAmelCase , num_groups_out=_lowerCAmelCase , embed_dim=block_out_channels[0] , out_channels=_lowerCAmelCase , act_fn=_lowerCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : torch.FloatTensor , __SCREAMING_SNAKE_CASE : Union[torch.Tensor, float, int] , __SCREAMING_SNAKE_CASE : bool = True , ) -> Union[UNetaDOutput, Tuple]:
__UpperCAmelCase =timestep
if not torch.is_tensor(_lowerCAmelCase ):
__UpperCAmelCase =torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(_lowerCAmelCase ) and len(timesteps.shape ) == 0:
__UpperCAmelCase =timesteps[None].to(sample.device )
__UpperCAmelCase =self.time_proj(_lowerCAmelCase )
if self.config.use_timestep_embedding:
__UpperCAmelCase =self.time_mlp(_lowerCAmelCase )
else:
__UpperCAmelCase =timestep_embed[..., None]
__UpperCAmelCase =timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
__UpperCAmelCase =timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
__UpperCAmelCase =()
for downsample_block in self.down_blocks:
__UpperCAmelCase , __UpperCAmelCase =downsample_block(hidden_states=_lowerCAmelCase , temb=_lowerCAmelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
__UpperCAmelCase =self.mid_block(_lowerCAmelCase , _lowerCAmelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
__UpperCAmelCase =down_block_res_samples[-1:]
__UpperCAmelCase =down_block_res_samples[:-1]
__UpperCAmelCase =upsample_block(_lowerCAmelCase , res_hidden_states_tuple=_lowerCAmelCase , temb=_lowerCAmelCase )
# 5. post-process
if self.out_block:
__UpperCAmelCase =self.out_block(_lowerCAmelCase , _lowerCAmelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_lowerCAmelCase )
| 68 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Union[str, Any] =logging.get_logger(__name__)
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =torch.load(lowercase__ , map_location="cpu" )
if "model" in sd.keys():
UpperCAmelCase_ =torch.load(lowercase__ , map_location="cpu" )["model"]
# pop unnecessary weights
UpperCAmelCase_ =[
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowercase__ )
UpperCAmelCase_ ={
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
UpperCAmelCase_ =sd.pop(lowercase__ )
UpperCAmelCase_ =list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
UpperCAmelCase_ =sd[key]
# We split QKV in separate Q,K,V
UpperCAmelCase_ =key.replace(".qkv_proj." , ".q_proj." )
UpperCAmelCase_ =key.replace(".qkv_proj." , ".k_proj." )
UpperCAmelCase_ =key.replace(".qkv_proj." , ".v_proj." )
UpperCAmelCase_ =value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =torch.split(lowercase__ , depth // 3 , dim=0 )
UpperCAmelCase_ =q
UpperCAmelCase_ =k
UpperCAmelCase_ =v
del sd[key]
return sd
@torch.no_grad()
def a__ ( lowercase__ , lowercase__ , lowercase__=None ):
'''simple docstring'''
UpperCAmelCase_ =load_checkpoint(lowercase__ )
if config is not None:
UpperCAmelCase_ =OPTConfig.from_pretrained(lowercase__ )
else:
UpperCAmelCase_ =OPTConfig()
UpperCAmelCase_ =OPTModel(lowercase__ ).half().eval()
model.load_state_dict(lowercase__ )
# Check results
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
if __name__ == "__main__":
__lowercase : List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
__lowercase : str =parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 54 | 0 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :Optional[Any] = HfArgumentParser(lowercase__ )
__magic_name__ :Optional[int] = parser.parse_args_into_dataclasses()[0]
__magic_name__ :Any = TensorFlowBenchmark(args=lowercase__ )
try:
__magic_name__ :Any = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__magic_name__ :Union[str, Any] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
__magic_name__ :Optional[int] = ''' '''.join(str(lowercase__ ).split(''' ''' )[:-1] )
__magic_name__ :Any = ''''''
__magic_name__ :int = eval(str(lowercase__ ).split(''' ''' )[-1] )
__magic_name__ :Union[str, Any] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase__ )
if len(lowercase__ ) > 0:
__magic_name__ :List[Any] = full_error_msg + begin_error_msg + str(lowercase__ )
raise ValueError(lowercase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 0 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
__lowercase : str ={
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
__lowercase : Any ={
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =(images / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_ =images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCAmelCase_ =numpy_to_pil(lowercase__ )
return images
def a__ ( lowercase__ ):
'''simple docstring'''
if images.ndim == 3:
UpperCAmelCase_ =images[None, ...]
UpperCAmelCase_ =(images * 2_5_5).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
UpperCAmelCase_ =[Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
UpperCAmelCase_ =[Image.fromarray(lowercase__ ) for image in images]
return pil_images
| 54 | 0 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
A_ = logging.get_logger(__name__)
# General docstring
A_ = """PoolFormerConfig"""
# Base docstring
A_ = """sail/poolformer_s12"""
A_ = [1, 512, 7, 7]
# Image classification docstring
A_ = """sail/poolformer_s12"""
A_ = """tabby, tabby cat"""
A_ = [
"""sail/poolformer_s12""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase = 0.0, UpperCAmelCase = False )-> Optional[Any]:
"""simple docstring"""
if drop_prob == 0.0 or not training:
return input
lowercase = 1 - drop_prob
lowercase = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
lowercase = keep_prob + torch.rand(lowercase__, dtype=input.dtype, device=input.device )
random_tensor.floor_() # binarize
lowercase = input.div(lowercase__ ) * random_tensor
return output
class __lowercase ( nn.Module ):
def __init__( self : Optional[Any] , __lowerCamelCase : Optional[float] = None ) -> None:
'''simple docstring'''
super().__init__()
lowercase = drop_prob
def __a ( self : Any , __lowerCamelCase : torch.Tensor ) -> torch.Tensor:
'''simple docstring'''
return drop_path(_lowerCAmelCase , self.drop_prob , self.training )
def __a ( self : Tuple ) -> str:
'''simple docstring'''
return "p={}".format(self.drop_prob )
class __lowercase ( nn.Module ):
def __init__( self : str , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any]=None ) -> List[str]:
'''simple docstring'''
super().__init__()
lowercase = patch_size if isinstance(_lowerCAmelCase , collections.abc.Iterable ) else (patch_size, patch_size)
lowercase = stride if isinstance(_lowerCAmelCase , collections.abc.Iterable ) else (stride, stride)
lowercase = padding if isinstance(_lowerCAmelCase , collections.abc.Iterable ) else (padding, padding)
lowercase = nn.Convad(_lowerCAmelCase , _lowerCAmelCase , kernel_size=_lowerCAmelCase , stride=_lowerCAmelCase , padding=_lowerCAmelCase )
lowercase = norm_layer(_lowerCAmelCase ) if norm_layer else nn.Identity()
def __a ( self : Any , __lowerCamelCase : str ) -> List[str]:
'''simple docstring'''
lowercase = self.projection(_lowerCAmelCase )
lowercase = self.norm(_lowerCAmelCase )
return embeddings
class __lowercase ( nn.GroupNorm ):
def __init__( self : int , __lowerCamelCase : int , **__lowerCamelCase : Any ) -> Tuple:
'''simple docstring'''
super().__init__(1 , _lowerCAmelCase , **_lowerCAmelCase )
class __lowercase ( nn.Module ):
def __init__( self : Tuple , __lowerCamelCase : int ) -> List[Any]:
'''simple docstring'''
super().__init__()
lowercase = nn.AvgPoolad(_lowerCAmelCase , stride=1 , padding=pool_size // 2 , count_include_pad=_lowerCAmelCase )
def __a ( self : List[Any] , __lowerCamelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return self.pool(_lowerCAmelCase ) - hidden_states
class __lowercase ( nn.Module ):
def __init__( self : Any , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] ) -> Any:
'''simple docstring'''
super().__init__()
lowercase = nn.Convad(_lowerCAmelCase , _lowerCAmelCase , 1 )
lowercase = nn.Convad(_lowerCAmelCase , _lowerCAmelCase , 1 )
lowercase = PoolFormerDropPath(_lowerCAmelCase )
if isinstance(config.hidden_act , _lowerCAmelCase ):
lowercase = ACTaFN[config.hidden_act]
else:
lowercase = config.hidden_act
def __a ( self : str , __lowerCamelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase = self.conva(_lowerCAmelCase )
lowercase = self.act_fn(_lowerCAmelCase )
lowercase = self.drop(_lowerCAmelCase )
lowercase = self.conva(_lowerCAmelCase )
lowercase = self.drop(_lowerCAmelCase )
return hidden_states
class __lowercase ( nn.Module ):
def __init__( self : str , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any ) -> List[Any]:
'''simple docstring'''
super().__init__()
lowercase = PoolFormerPooling(_lowerCAmelCase )
lowercase = PoolFormerOutput(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase = PoolFormerGroupNorm(_lowerCAmelCase )
lowercase = PoolFormerGroupNorm(_lowerCAmelCase )
# Useful for training neural nets
lowercase = PoolFormerDropPath(_lowerCAmelCase ) if drop_path > 0.0 else nn.Identity()
lowercase = config.use_layer_scale
if config.use_layer_scale:
lowercase = nn.Parameter(
config.layer_scale_init_value * torch.ones((_lowerCAmelCase) ) , requires_grad=_lowerCAmelCase )
lowercase = nn.Parameter(
config.layer_scale_init_value * torch.ones((_lowerCAmelCase) ) , requires_grad=_lowerCAmelCase )
def __a ( self : Tuple , __lowerCamelCase : List[str] ) -> List[Any]:
'''simple docstring'''
if self.use_layer_scale:
lowercase = self.pooling(self.before_norm(_lowerCAmelCase ) )
lowercase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
lowercase = hidden_states + self.drop_path(_lowerCAmelCase )
lowercase = ()
lowercase = self.output(self.after_norm(_lowerCAmelCase ) )
lowercase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
lowercase = hidden_states + self.drop_path(_lowerCAmelCase )
lowercase = (output,) + outputs
return outputs
else:
lowercase = self.drop_path(self.pooling(self.before_norm(_lowerCAmelCase ) ) )
# First residual connection
lowercase = pooling_output + hidden_states
lowercase = ()
# Second residual connection inside the PoolFormerOutput block
lowercase = self.drop_path(self.output(self.after_norm(_lowerCAmelCase ) ) )
lowercase = hidden_states + layer_output
lowercase = (output,) + outputs
return outputs
class __lowercase ( nn.Module ):
def __init__( self : Union[str, Any] , __lowerCamelCase : Dict ) -> str:
'''simple docstring'''
super().__init__()
lowercase = config
# stochastic depth decay rule
lowercase = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
lowercase = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
lowercase = nn.ModuleList(_lowerCAmelCase )
# Transformer blocks
lowercase = []
lowercase = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
lowercase = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
_lowerCAmelCase , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(_lowerCAmelCase ) )
lowercase = nn.ModuleList(_lowerCAmelCase )
def __a ( self : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Dict=True ) -> str:
'''simple docstring'''
lowercase = () if output_hidden_states else None
lowercase = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
lowercase ,lowercase = layers
# Get patch embeddings from hidden_states
lowercase = embedding_layer(_lowerCAmelCase )
# Send the embeddings through the blocks
for _, blk in enumerate(_lowerCAmelCase ):
lowercase = blk(_lowerCAmelCase )
lowercase = layer_outputs[0]
if output_hidden_states:
lowercase = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_lowerCAmelCase , hidden_states=_lowerCAmelCase )
class __lowercase ( __lowercase ):
lowercase = PoolFormerConfig
lowercase = 'poolformer'
lowercase = 'pixel_values'
lowercase = True
def __a ( self : Tuple , __lowerCamelCase : str ) -> Optional[Any]:
'''simple docstring'''
if isinstance(_lowerCAmelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_lowerCAmelCase , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def __a ( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple=False ) -> Optional[Any]:
'''simple docstring'''
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase = value
A_ = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
A_ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
"""
@add_start_docstrings(
'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , __lowercase , )
class __lowercase ( __lowercase ):
def __init__( self : Any , __lowerCamelCase : List[Any] ) -> str:
'''simple docstring'''
super().__init__(_lowerCAmelCase )
lowercase = config
lowercase = PoolFormerEncoder(_lowerCAmelCase )
# Initialize weights and apply final processing
self.post_init()
def __a ( self : str ) -> int:
'''simple docstring'''
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __a ( self : Any , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
'''simple docstring'''
lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
lowercase = self.encoder(
_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase , )
lowercase = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=_lowerCAmelCase , hidden_states=encoder_outputs.hidden_states , )
class __lowercase ( nn.Module ):
def __init__( self : str , __lowerCamelCase : Dict ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase = nn.Linear(config.hidden_size , config.hidden_size )
def __a ( self : Dict , __lowerCamelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
lowercase = self.dense(_lowerCAmelCase )
return output
@add_start_docstrings(
'\n PoolFormer Model transformer with an image classification head on top\n ' , __lowercase , )
class __lowercase ( __lowercase ):
def __init__( self : Union[str, Any] , __lowerCamelCase : Optional[int] ) -> Dict:
'''simple docstring'''
super().__init__(_lowerCAmelCase )
lowercase = config.num_labels
lowercase = PoolFormerModel(_lowerCAmelCase )
# Final norm
lowercase = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
lowercase = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __a ( self : Dict , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[torch.LongTensor] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
'''simple docstring'''
lowercase = return_dict if return_dict is not None else self.config.use_return_dict
lowercase = self.poolformer(
_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase , )
lowercase = outputs[0]
lowercase = self.classifier(self.norm(_lowerCAmelCase ).mean([-2, -1] ) )
lowercase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase = '''single_label_classification'''
else:
lowercase = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowercase = MSELoss()
if self.num_labels == 1:
lowercase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase = loss_fct(_lowerCAmelCase , _lowerCAmelCase )
elif self.config.problem_type == "single_label_classification":
lowercase = CrossEntropyLoss()
lowercase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase = BCEWithLogitsLoss()
lowercase = loss_fct(_lowerCAmelCase , _lowerCAmelCase )
if not return_dict:
lowercase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_lowerCAmelCase , logits=_lowerCAmelCase , hidden_states=outputs.hidden_states )
| 604 |
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =int(lowercase__ )
if n_element < 1:
UpperCAmelCase_ =ValueError("a should be a positive number" )
raise my_error
UpperCAmelCase_ =[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =(0, 0, 0)
UpperCAmelCase_ =1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
__lowercase : Tuple =input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
__lowercase : Union[str, Any] =hamming(int(n))
print("""-----------------------------------------------------""")
print(f"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 54 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( __lowercase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = CLIPTokenizer
SCREAMING_SNAKE_CASE__ :int = CLIPTokenizerFast
SCREAMING_SNAKE_CASE__ :Any = True
SCREAMING_SNAKE_CASE__ :Optional[int] = {}
SCREAMING_SNAKE_CASE__ :List[str] = False
def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
super().setUp()
# fmt: off
_UpperCamelCase : Optional[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_UpperCamelCase : Dict = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
_UpperCamelCase : str = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
_UpperCamelCase : List[Any] = {"unk_token": "<unk>"}
_UpperCamelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_lowerCAmelCase ) )
def __SCREAMING_SNAKE_CASE ( self : str , **__a : Optional[Any] ) -> Tuple:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **__a : Optional[int] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Dict ) -> int:
_UpperCamelCase : Dict = "lower newer"
_UpperCamelCase : int = "lower newer"
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
_UpperCamelCase : Union[str, Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCamelCase : str = "lower newer"
_UpperCamelCase : int = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
_UpperCamelCase : Any = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
_UpperCamelCase : List[str] = tokens + [tokenizer.unk_token]
_UpperCamelCase : Dict = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
@require_ftfy
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : str = self.tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
_UpperCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
_UpperCamelCase : Any = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
_UpperCamelCase : int = tokenizer_s.tokenize(_lowerCAmelCase )
_UpperCamelCase : List[Any] = tokenizer_r.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
_UpperCamelCase : Union[str, Any] = "xa\u0303y" + " " + "x\xe3y"
_UpperCamelCase : Optional[Any] = tokenizer_s.tokenize(_lowerCAmelCase )
_UpperCamelCase : List[str] = tokenizer_r.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
# Test that the tokenization is identical on unicode of space type
_UpperCamelCase : List[str] = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
_UpperCamelCase : Dict = tokenizer_s.tokenize(_lowerCAmelCase )
_UpperCamelCase : Optional[Any] = tokenizer_r.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
# Test that the tokenization is identical on unicode of line break type
_UpperCamelCase : int = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
_UpperCamelCase : int = tokenizer_s.tokenize(_lowerCAmelCase )
_UpperCamelCase : Union[str, Any] = tokenizer_r.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : str = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
_UpperCamelCase : str = F'''{text_of_1_token} {text_of_1_token}'''
_UpperCamelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase , use_fast=_lowerCAmelCase , )
_UpperCamelCase : Optional[Any] = tokenizer_r(_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowerCAmelCase ) + 1, len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) , )
_UpperCamelCase : List[Any] = F''' {text}'''
_UpperCamelCase : int = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase , use_fast=_lowerCAmelCase , )
_UpperCamelCase : Optional[Any] = tokenizer_r(_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowerCAmelCase ) + 1, 1 + len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
with self.assertRaises(_lowerCAmelCase ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
super().test_tokenization_python_rust_equals()
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
pass
| 624 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__lowercase : List[Any] =logging.get_logger(__name__)
class A ( __lowercase ):
def __init__( self: List[Any] , *_lowerCAmelCase: Optional[Any] , **_lowerCAmelCase: List[str] ) -> None:
'''simple docstring'''
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 54 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] ={"""vocab_file""": """sentencepiece.bpe.model"""}
__lowerCAmelCase : Any ={
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
}
__lowerCAmelCase : Union[str, Any] ={
"""moussaKam/mbarthez""": 1_0_2_4,
"""moussaKam/barthez""": 1_0_2_4,
"""moussaKam/barthez-orangesum-title""": 1_0_2_4,
}
__lowerCAmelCase : Dict ="""▁"""
class _A ( __lowercase ):
snake_case__ : Optional[int] = VOCAB_FILES_NAMES
snake_case__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self , __lowerCAmelCase , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<mask>" , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
lowercase = vocab_file
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCAmelCase ) )
lowercase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
lowercase = len(self.sp_model ) - 1
lowercase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase = [self.cls_token_id]
lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A__ ( self ):
"""simple docstring"""
return len(self.sp_model )
def A__ ( self ):
"""simple docstring"""
lowercase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase = self.sp_model.PieceToId(_lowerCAmelCase )
return spm_id if spm_id else self.unk_token_id
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(_lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = []
lowercase = """"""
lowercase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
lowercase = True
lowercase = []
else:
current_sub_tokens.append(_lowerCAmelCase )
lowercase = False
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def __getstate__( self ):
"""simple docstring"""
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(_lowerCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , """wb""" ) as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 359 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class A ( __lowercase , unittest.TestCase ):
_snake_case =CanineTokenizer
_snake_case =False
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
UpperCAmelCase_ =CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase__ ( self: Optional[int] ) -> List[str]:
'''simple docstring'''
return CanineTokenizer.from_pretrained("google/canine-s" )
def lowerCAmelCase__ ( self: Union[str, Any] , **_lowerCAmelCase: List[Any] ) -> CanineTokenizer:
'''simple docstring'''
UpperCAmelCase_ =self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
UpperCAmelCase_ =1024
return tokenizer
@require_torch
def lowerCAmelCase__ ( self: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
UpperCAmelCase_ =[5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
UpperCAmelCase_ =tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =list(batch.input_ids.numpy()[0] )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def lowerCAmelCase__ ( self: int ) -> str:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
UpperCAmelCase_ =tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , _lowerCAmelCase )
self.assertIn("attention_mask" , _lowerCAmelCase )
self.assertIn("token_type_ids" , _lowerCAmelCase )
@require_torch
def lowerCAmelCase__ ( self: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =[
"What's the weater?",
"It's about 25 degrees.",
]
UpperCAmelCase_ =tokenizer(
text_target=_lowerCAmelCase , max_length=32 , padding="max_length" , truncation=_lowerCAmelCase , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def lowerCAmelCase__ ( self: Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ =tempfile.mkdtemp()
UpperCAmelCase_ =" He is very happy, UNwant\u00E9d,running"
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
shutil.rmtree(_lowerCAmelCase )
UpperCAmelCase_ =self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ =tempfile.mkdtemp()
UpperCAmelCase_ =" He is very happy, UNwant\u00E9d,running"
UpperCAmelCase_ =tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
UpperCAmelCase_ =chr(0xe0_07 )
additional_special_tokens.append(_lowerCAmelCase )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertIn(_lowerCAmelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_lowerCAmelCase )
def lowerCAmelCase__ ( self: int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ , UpperCAmelCase_ =self.get_clean_sequence(_lowerCAmelCase )
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_05
UpperCAmelCase_ =chr(_lowerCAmelCase )
tokenizer.add_special_tokens({"cls_token": special_token} )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
UpperCAmelCase_ =tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , input_encoded + special_token_id )
UpperCAmelCase_ =tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def lowerCAmelCase__ ( self: Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ =chr(0xe0_05 )
UpperCAmelCase_ =chr(0xe0_06 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=_lowerCAmelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
UpperCAmelCase_ =tokenizer.tokenize(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.tokenize(_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
self.assertEqual(token_a[0] , _lowerCAmelCase )
self.assertEqual(token_a[0] , _lowerCAmelCase )
@require_tokenizers
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
UpperCAmelCase_ =AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(_lowerCAmelCase )
tokenizer.from_pretrained(_lowerCAmelCase )
def lowerCAmelCase__ ( self: Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase_ =json.load(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase_ =json.load(_lowerCAmelCase )
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
UpperCAmelCase_ =[new_token_a]
UpperCAmelCase_ =[new_token_a]
with open(os.path.join(_lowerCAmelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase_ =tokenizer_class.from_pretrained(_lowerCAmelCase , extra_ids=0 )
self.assertIn(_lowerCAmelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
UpperCAmelCase_ =0xe0_07
UpperCAmelCase_ =chr(_lowerCAmelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase_ =[AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase )]
UpperCAmelCase_ =tokenizer_class.from_pretrained(
_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , extra_ids=0 )
self.assertIn(_lowerCAmelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ ="hello world"
if self.space_between_special_tokens:
UpperCAmelCase_ ="[CLS] hello world [SEP]"
else:
UpperCAmelCase_ =input
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.decode(_lowerCAmelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(_lowerCAmelCase , [output, output.lower()] )
def lowerCAmelCase__ ( self: List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ =[
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
UpperCAmelCase_ ="a"
UpperCAmelCase_ =ord(_lowerCAmelCase )
for attr in attributes_list:
setattr(_lowerCAmelCase , attr + "_id" , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , attr + "_id" ) , _lowerCAmelCase )
setattr(_lowerCAmelCase , attr + "_id" , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , attr + "_id" ) , _lowerCAmelCase )
setattr(_lowerCAmelCase , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens_ids" ) , [] )
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
setattr(_lowerCAmelCase , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def lowerCAmelCase__ ( self: List[str] ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Dict ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Any ) -> List[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: List[Any] ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Tuple ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: str ) -> str:
'''simple docstring'''
pass
| 54 | 0 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
SCREAMING_SNAKE_CASE__ = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 48
SCREAMING_SNAKE_CASE__ = """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
SCREAMING_SNAKE_CASE__ = [6, 6, 6, 6]
SCREAMING_SNAKE_CASE__ = 60
SCREAMING_SNAKE_CASE__ = [6, 6, 6, 6]
SCREAMING_SNAKE_CASE__ = """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 1_26
SCREAMING_SNAKE_CASE__ = 7
SCREAMING_SNAKE_CASE__ = 2_55.0
SCREAMING_SNAKE_CASE__ = """"""
return config
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""layers""" , """encoder.stages""" )
if "residual_group.blocks" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""residual_group.blocks""" , """layers""" )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.proj""" , """patch_embed.projection""" )
if name == "norm.weight":
SCREAMING_SNAKE_CASE__ = """layernorm.weight"""
if name == "norm.bias":
SCREAMING_SNAKE_CASE__ = """layernorm.bias"""
if "conv_first" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""conv_first""" , """first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""conv_last""" , """final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" )
if "upsample.0" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""upsample.0""" , """upsample.convolution_0""" )
if "upsample.2" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""upsample.2""" , """upsample.convolution_1""" )
SCREAMING_SNAKE_CASE__ = """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
SCREAMING_SNAKE_CASE__ = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" )
SCREAMING_SNAKE_CASE__ = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" )
else:
pass
else:
SCREAMING_SNAKE_CASE__ = """swin2sr.""" + name
return name
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ = orig_state_dict.pop(lowercase__ )
if "qkv" in key:
SCREAMING_SNAKE_CASE__ = key.split(""".""" )
SCREAMING_SNAKE_CASE__ = int(key_split[1] )
SCREAMING_SNAKE_CASE__ = int(key_split[4] )
SCREAMING_SNAKE_CASE__ = config.embed_dim
if "weight" in key:
SCREAMING_SNAKE_CASE__ = val[:dim, :]
SCREAMING_SNAKE_CASE__ = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE__ = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE__ = val[:dim]
SCREAMING_SNAKE_CASE__ = val[dim : dim * 2]
SCREAMING_SNAKE_CASE__ = val[-dim:]
pass
else:
SCREAMING_SNAKE_CASE__ = val
return orig_state_dict
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = get_config(lowercase__ )
SCREAMING_SNAKE_CASE__ = SwinaSRForImageSuperResolution(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE__ = torch.hub.load_state_dict_from_url(lowercase__ , map_location="""cpu""" )
SCREAMING_SNAKE_CASE__ = convert_state_dict(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.load_state_dict(lowercase__ , strict=lowercase__ )
if len(lowercase__ ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(lowercase__ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"""Unexpected key {key} in state_dict""" )
# verify values
SCREAMING_SNAKE_CASE__ = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert("""RGB""" )
SCREAMING_SNAKE_CASE__ = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
SCREAMING_SNAKE_CASE__ = 1_26 if """Jpeg""" in checkpoint_url else 2_56
SCREAMING_SNAKE_CASE__ = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
SCREAMING_SNAKE_CASE__ = transforms(lowercase__ ).unsqueeze(0 )
if config.num_channels == 1:
SCREAMING_SNAKE_CASE__ = pixel_values[:, 0, :, :].unsqueeze(1 )
SCREAMING_SNAKE_CASE__ = model(lowercase__ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
SCREAMING_SNAKE_CASE__ = torch.Size([1, 3, 5_12, 5_12] )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-0.70_87, -0.71_38, -0.67_21], [-0.83_40, -0.80_95, -0.72_98], [-0.91_49, -0.84_14, -0.79_40]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
SCREAMING_SNAKE_CASE__ = torch.Size([1, 3, 10_24, 10_24] )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-0.77_75, -0.81_05, -0.89_33], [-0.77_64, -0.83_56, -0.92_25], [-0.79_76, -0.86_86, -0.95_79]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
SCREAMING_SNAKE_CASE__ = torch.Size([1, 3, 10_24, 10_24] )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-0.80_35, -0.75_04, -0.74_91], [-0.85_38, -0.81_24, -0.77_82], [-0.88_04, -0.86_51, -0.84_93]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
SCREAMING_SNAKE_CASE__ = torch.Size([1, 3, 5_12, 5_12] )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-0.76_69, -0.86_62, -0.87_67], [-0.88_10, -0.99_62, -0.98_20], [-0.93_40, -1.03_22, -1.11_49]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
SCREAMING_SNAKE_CASE__ = torch.Size([1, 3, 10_24, 10_24] )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-0.52_38, -0.55_57, -0.63_21], [-0.60_16, -0.59_03, -0.63_91], [-0.62_44, -0.63_34, -0.68_89]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"""
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowercase__ , atol=1e-3 )
print("""Looks ok!""" )
SCREAMING_SNAKE_CASE__ = {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
SCREAMING_SNAKE_CASE__ = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowercase__ )
if push_to_hub:
model.push_to_hub(f"""caidas/{model_name}""" )
processor.push_to_hub(f"""caidas/{model_name}""" )
if __name__ == "__main__":
A_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
A_ : str = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 196 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowercase : Optional[int] ="""\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
__lowercase : Dict ="""\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
__lowercase : List[str] ="""\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def lowerCAmelCase__ ( self: int ) -> MetricInfo:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: List[List[List[str]]] , _lowerCAmelCase: List[List[str]] , _lowerCAmelCase: int = 1 , _lowerCAmelCase: int = 4 , ) -> Dict[str, float]:
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_lowerCAmelCase , hypotheses=_lowerCAmelCase , min_len=_lowerCAmelCase , max_len=_lowerCAmelCase )
}
| 54 | 0 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
_lowerCAmelCase : str = 1 - (matter_density + radiation_density + dark_energy)
_lowerCAmelCase : List[str] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
_lowerCAmelCase : List[str] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
_lowerCAmelCase = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 259 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A ( __lowercase , unittest.TestCase ):
_snake_case =KandinskyVaaImgaImgPipeline
_snake_case =['''image_embeds''', '''negative_image_embeds''', '''image''']
_snake_case =[
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
_snake_case =[
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_snake_case =False
@property
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[str]:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__ ( self: List[str] ) -> Dict:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self: int ) -> str:
'''simple docstring'''
return 100
@property
def lowerCAmelCase__ ( self: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ ={
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCAmelCase_ =UNetaDConditionModel(**_lowerCAmelCase )
return model
@property
def lowerCAmelCase__ ( self: Any ) -> Tuple:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ =VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase__ ( self: Dict ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.dummy_unet
UpperCAmelCase_ =self.dummy_movq
UpperCAmelCase_ ={
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
UpperCAmelCase_ =DDIMScheduler(**_lowerCAmelCase )
UpperCAmelCase_ ={
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: Any , _lowerCAmelCase: Optional[Any]=0 ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
UpperCAmelCase_ =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCAmelCase )
# create init_image
UpperCAmelCase_ =floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
UpperCAmelCase_ =image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ =Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert("RGB" ).resize((256, 256) )
if str(_lowerCAmelCase ).startswith("mps" ):
UpperCAmelCase_ =torch.manual_seed(_lowerCAmelCase )
else:
UpperCAmelCase_ =torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
UpperCAmelCase_ ={
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self: int ) -> int:
'''simple docstring'''
UpperCAmelCase_ ="cpu"
UpperCAmelCase_ =self.get_dummy_components()
UpperCAmelCase_ =self.pipeline_class(**_lowerCAmelCase )
UpperCAmelCase_ =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
UpperCAmelCase_ =output.images
UpperCAmelCase_ =pipe(
**self.get_dummy_inputs(_lowerCAmelCase ) , return_dict=_lowerCAmelCase , )[0]
UpperCAmelCase_ =image[0, -3:, -3:, -1]
UpperCAmelCase_ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ =np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: List[Any] ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_img2img_frog.npy" )
UpperCAmelCase_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCAmelCase_ ="A red cartoon frog, 4k"
UpperCAmelCase_ =KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCAmelCase )
UpperCAmelCase_ =KandinskyVaaImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
UpperCAmelCase_ =pipeline.to(_lowerCAmelCase )
pipeline.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ , UpperCAmelCase_ =pipe_prior(
_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCAmelCase_ =pipeline(
image=_lowerCAmelCase , image_embeds=_lowerCAmelCase , negative_image_embeds=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
UpperCAmelCase_ =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 54 | 0 |
def _UpperCAmelCase (UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str] = 0 , UpperCamelCase_ : Optional[int] = 0 ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = right or len(lowercase__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(lowercase__ , lowercase__ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 429 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class A ( unittest.TestCase ):
def __init__( self: Optional[int] , _lowerCAmelCase: Tuple , _lowerCAmelCase: Optional[Any]=13 , _lowerCAmelCase: Optional[int]=7 , _lowerCAmelCase: Any=True , _lowerCAmelCase: List[Any]=True , _lowerCAmelCase: List[str]=True , _lowerCAmelCase: str=True , _lowerCAmelCase: Optional[int]=99 , _lowerCAmelCase: Any=32 , _lowerCAmelCase: Any=5 , _lowerCAmelCase: Tuple=4 , _lowerCAmelCase: Union[str, Any]=37 , _lowerCAmelCase: List[str]="gelu" , _lowerCAmelCase: Dict=0.1 , _lowerCAmelCase: Tuple=0.1 , _lowerCAmelCase: int=512 , _lowerCAmelCase: Tuple=16 , _lowerCAmelCase: Tuple=2 , _lowerCAmelCase: str=0.02 , _lowerCAmelCase: Optional[Any]=4 , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =seq_length
UpperCAmelCase_ =is_training
UpperCAmelCase_ =use_attention_mask
UpperCAmelCase_ =use_token_type_ids
UpperCAmelCase_ =use_labels
UpperCAmelCase_ =vocab_size
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =num_attention_heads
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =max_position_embeddings
UpperCAmelCase_ =type_vocab_size
UpperCAmelCase_ =type_sequence_label_size
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =num_choices
def lowerCAmelCase__ ( self: Dict ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ =None
if self.use_attention_mask:
UpperCAmelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ =None
if self.use_token_type_ids:
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ =RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self: str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self: Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ =True
UpperCAmelCase_ =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A ( __lowercase , unittest.TestCase ):
_snake_case =True
_snake_case =(
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self: Dict ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =FlaxRobertaModelTester(self )
@slow
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase_ =model_class_name.from_pretrained("roberta-base" , from_pt=_lowerCAmelCase )
UpperCAmelCase_ =model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
| 54 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def _A ( snake_case ) -> Dict:
_lowercase : str = torch.load(lowercase__ , map_location="cpu" )
if "model" in sd.keys():
_lowercase : str = torch.load(lowercase__ , map_location="cpu" )["model"]
# pop unnecessary weights
_lowercase : str = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowercase__ )
_lowercase : Optional[Any] = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowercase : Dict = sd.pop(lowercase__ )
_lowercase : Dict = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowercase : Union[str, Any] = sd[key]
# We split QKV in separate Q,K,V
_lowercase : List[Any] = key.replace(".qkv_proj." , ".q_proj." )
_lowercase : str = key.replace(".qkv_proj." , ".k_proj." )
_lowercase : int = key.replace(".qkv_proj." , ".v_proj." )
_lowercase : Union[str, Any] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowercase , _lowercase , _lowercase : Dict = torch.split(lowercase__ , depth // 3 , dim=0 )
_lowercase : List[str] = q
_lowercase : List[str] = k
_lowercase : str = v
del sd[key]
return sd
@torch.no_grad()
def _A ( snake_case , snake_case , snake_case=None ) -> Optional[Any]:
_lowercase : Any = load_checkpoint(lowercase__ )
if config is not None:
_lowercase : List[str] = OPTConfig.from_pretrained(lowercase__ )
else:
_lowercase : int = OPTConfig()
_lowercase : List[Any] = OPTModel(lowercase__ ).half().eval()
model.load_state_dict(lowercase__ )
# Check results
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
_snake_case = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 245 |
from __future__ import annotations
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , a % b )
UpperCAmelCase_ =a // b
return (y, x - k * y)
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , lowercase__ )
UpperCAmelCase_ =na * na
UpperCAmelCase_ =ra * x * na + ra * y * na
return (n % m + m) % m
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , lowercase__ )
if b < 0:
UpperCAmelCase_ =(b % n + n) % n
return b
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =invert_modulo(lowercase__ , lowercase__ ), invert_modulo(lowercase__ , lowercase__ )
UpperCAmelCase_ =na * na
UpperCAmelCase_ =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="""chinese_remainder_theorem""", verbose=True)
testmod(name="""chinese_remainder_theorem2""", verbose=True)
testmod(name="""invert_modulo""", verbose=True)
testmod(name="""extended_euclid""", verbose=True)
| 54 | 0 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ ) -> Tuple:
'''simple docstring'''
_lowercase : List[str] = list(lowercase__ )
_lowercase : Union[str, Any] = list(lowercase__ )
_lowercase : str = 0
for i in range(len(lowercase__ ) ):
if lista[i] != lista[i]:
count += 1
_lowercase : Tuple = '''_'''
if count > 1:
return False
else:
return "".join(lowercase__ )
def UpperCamelCase__ ( UpperCAmelCase_ ) -> Optional[Any]:
'''simple docstring'''
_lowercase : List[str] = []
while True:
_lowercase : Any = ['''$'''] * len(lowercase__ )
_lowercase : Tuple = []
for i in range(len(lowercase__ ) ):
for j in range(i + 1 , len(lowercase__ ) ):
_lowercase : Union[str, Any] = compare_string(binary[i] , binary[j] )
if k is False:
_lowercase : Any = '''*'''
_lowercase : Tuple = '''*'''
temp.append('''X''' )
for i in range(len(lowercase__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(lowercase__ ) == 0:
return pi
_lowercase : Tuple = list(set(lowercase__ ) )
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ ) -> str:
'''simple docstring'''
_lowercase : int = []
for minterm in minterms:
_lowercase : Optional[int] = ''''''
for _ in range(lowercase__ ):
_lowercase : str = str(minterm % 2 ) + string
minterm //= 2
temp.append(lowercase__ )
return temp
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> Tuple:
'''simple docstring'''
_lowercase : List[Any] = list(lowercase__ )
_lowercase : List[str] = list(lowercase__ )
_lowercase : Any = 0
for i in range(len(lowercase__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ ) -> Tuple:
'''simple docstring'''
_lowercase : int = []
_lowercase : Optional[int] = [0] * len(lowercase__ )
for i in range(len(chart[0] ) ):
_lowercase : int = 0
_lowercase : List[Any] = -1
for j in range(len(lowercase__ ) ):
if chart[j][i] == 1:
count += 1
_lowercase : Dict = j
if count == 1:
_lowercase : List[str] = 1
for i in range(len(lowercase__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(lowercase__ ) ):
_lowercase : Optional[Any] = 0
temp.append(prime_implicants[i] )
while True:
_lowercase : List[Any] = 0
_lowercase : str = -1
_lowercase : Union[str, Any] = 0
for i in range(len(lowercase__ ) ):
_lowercase : List[str] = chart[i].count(1 )
if count_n > max_n:
_lowercase : str = count_n
_lowercase : List[str] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(lowercase__ ) ):
_lowercase : Dict = 0
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ ) -> Optional[int]:
'''simple docstring'''
_lowercase : Union[str, Any] = [[0 for x in range(len(lowercase__ ) )] for x in range(len(lowercase__ ) )]
for i in range(len(lowercase__ ) ):
_lowercase : List[Any] = prime_implicants[i].count('''_''' )
for j in range(len(lowercase__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , lowercase__ ):
_lowercase : Tuple = 1
return chart
def UpperCamelCase__ ( ) -> Dict:
'''simple docstring'''
_lowercase : Optional[int] = int(input('''Enter the no. of variables\n''' ) )
_lowercase : Optional[Any] = [
float(lowercase__ )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
_lowercase : Optional[int] = decimal_to_binary(lowercase__ , lowercase__ )
_lowercase : Optional[int] = check(lowercase__ )
print('''Prime Implicants are:''' )
print(lowercase__ )
_lowercase : Optional[int] = prime_implicant_chart(lowercase__ , lowercase__ )
_lowercase : List[Any] = selection(lowercase__ , lowercase__ )
print('''Essential Prime Implicants are:''' )
print(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 322 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__lowercase : Tuple =logging.getLogger(__name__)
__lowercase : Optional[int] =tf.data.AUTOTUNE
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config" , type=lowercase__ , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , )
parser.add_argument(
"--tokenizer" , type=lowercase__ , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , )
parser.add_argument(
"--per_replica_batch_size" , type=lowercase__ , default=8 , help="Batch size per TPU core." , )
parser.add_argument(
"--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , )
parser.add_argument(
"--tpu_name" , type=lowercase__ , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , )
parser.add_argument(
"--tpu_zone" , type=lowercase__ , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , )
parser.add_argument(
"--gcp_project" , type=lowercase__ , help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , )
parser.add_argument(
"--train_dataset" , type=lowercase__ , help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--shuffle_buffer_size" , type=lowercase__ , default=2**1_8 , help="Size of the shuffle buffer (in samples)" , )
parser.add_argument(
"--eval_dataset" , type=lowercase__ , help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--num_epochs" , type=lowercase__ , default=1 , help="Number of epochs to train for." , )
parser.add_argument(
"--learning_rate" , type=lowercase__ , default=1E-4 , help="Learning rate to use for training." , )
parser.add_argument(
"--weight_decay_rate" , type=lowercase__ , default=1E-3 , help="Weight decay rate to use for training." , )
parser.add_argument(
"--max_length" , type=lowercase__ , default=5_1_2 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , )
parser.add_argument(
"--mlm_probability" , type=lowercase__ , default=0.15 , help="Fraction of tokens to mask during training." , )
parser.add_argument("--output_dir" , type=lowercase__ , required=lowercase__ , help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id" , type=lowercase__ , help="Model ID to upload to on the Hugging Face Hub." )
UpperCAmelCase_ =parser.parse_args()
return args
def a__ ( lowercase__ ):
'''simple docstring'''
try:
if args.tpu_name:
UpperCAmelCase_ =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
UpperCAmelCase_ =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(lowercase__ )
tf.tpu.experimental.initialize_tpu_system(lowercase__ )
return tpu
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =0
for file in file_list:
UpperCAmelCase_ =file.split("/" )[-1]
UpperCAmelCase_ =re.search(R"-\d+-(\d+)\.tfrecord" , lowercase__ ).group(1 )
UpperCAmelCase_ =int(lowercase__ )
num_samples += sample_count
return num_samples
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=None ):
'''simple docstring'''
UpperCAmelCase_ =count_samples(lowercase__ )
UpperCAmelCase_ =tf.data.Dataset.from_tensor_slices(lowercase__ )
if shuffle:
UpperCAmelCase_ =dataset.shuffle(len(lowercase__ ) )
UpperCAmelCase_ =tf.data.TFRecordDataset(lowercase__ , num_parallel_reads=lowercase__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
UpperCAmelCase_ =dataset.apply(tf.data.experimental.assert_cardinality(lowercase__ ) )
UpperCAmelCase_ =dataset.map(lowercase__ , num_parallel_calls=lowercase__ )
if shuffle:
assert shuffle_buffer_size is not None
UpperCAmelCase_ =dataset.shuffle(args.shuffle_buffer_size )
UpperCAmelCase_ =dataset.batch(lowercase__ , drop_remainder=lowercase__ )
UpperCAmelCase_ =dataset.map(lowercase__ , num_parallel_calls=lowercase__ )
UpperCAmelCase_ =dataset.prefetch(lowercase__ )
return dataset
def a__ ( lowercase__ ):
'''simple docstring'''
if not args.no_tpu:
UpperCAmelCase_ =initialize_tpu(lowercase__ )
UpperCAmelCase_ =tf.distribute.TPUStrategy(lowercase__ )
else:
UpperCAmelCase_ =tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
UpperCAmelCase_ =AutoTokenizer.from_pretrained(args.tokenizer )
UpperCAmelCase_ =AutoConfig.from_pretrained(args.pretrained_model_config )
UpperCAmelCase_ =tokenizer.vocab_size
UpperCAmelCase_ =tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) )
if not training_records:
raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' )
UpperCAmelCase_ =tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) )
if not eval_records:
raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' )
UpperCAmelCase_ =count_samples(lowercase__ )
UpperCAmelCase_ =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
UpperCAmelCase_ =steps_per_epoch * args.num_epochs
with strategy.scope():
UpperCAmelCase_ =TFAutoModelForMaskedLM.from_config(lowercase__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
UpperCAmelCase_ , UpperCAmelCase_ =create_optimizer(
num_train_steps=lowercase__ , num_warmup_steps=total_train_steps // 2_0 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowercase__ , metrics=["accuracy"] )
def decode_fn(lowercase__ ):
UpperCAmelCase_ ={
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowercase__ , lowercase__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
UpperCAmelCase_ =DataCollatorForLanguageModeling(
tokenizer=lowercase__ , mlm_probability=args.mlm_probability , mlm=lowercase__ , return_tensors="tf" )
def mask_with_collator(lowercase__ ):
# TF really needs an isin() function
UpperCAmelCase_ =(
~tf.cast(batch["attention_mask"] , tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
UpperCAmelCase_ , UpperCAmelCase_ =data_collator.tf_mask_tokens(
batch["input_ids"] , vocab_size=len(lowercase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase__ , )
return batch
UpperCAmelCase_ =args.per_replica_batch_size * strategy.num_replicas_in_sync
UpperCAmelCase_ =prepare_dataset(
lowercase__ , decode_fn=lowercase__ , mask_fn=lowercase__ , batch_size=lowercase__ , shuffle=lowercase__ , shuffle_buffer_size=args.shuffle_buffer_size , )
UpperCAmelCase_ =prepare_dataset(
lowercase__ , decode_fn=lowercase__ , mask_fn=lowercase__ , batch_size=lowercase__ , shuffle=lowercase__ , )
UpperCAmelCase_ =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase__ ) )
model.fit(
lowercase__ , validation_data=lowercase__ , epochs=args.num_epochs , callbacks=lowercase__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__lowercase : Union[str, Any] =parse_args()
main(args)
| 54 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : Any = {
"""configuration_clipseg""": [
"""CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPSegConfig""",
"""CLIPSegTextConfig""",
"""CLIPSegVisionConfig""",
],
"""processing_clipseg""": ["""CLIPSegProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any] = [
"""CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPSegModel""",
"""CLIPSegPreTrainedModel""",
"""CLIPSegTextModel""",
"""CLIPSegVisionModel""",
"""CLIPSegForImageSegmentation""",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
A__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 353 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A :
@staticmethod
def lowerCAmelCase__ ( *_lowerCAmelCase: List[Any] , **_lowerCAmelCase: List[str] ) -> List[str]:
'''simple docstring'''
pass
@is_pipeline_test
@require_torch
@require_vision
class A ( unittest.TestCase ):
_snake_case =MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: List[str] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
UpperCAmelCase_ =[
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: str ) -> int:
'''simple docstring'''
UpperCAmelCase_ =vqa_pipeline(_lowerCAmelCase , top_k=1 )
self.assertEqual(
_lowerCAmelCase , [
[{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}],
[{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}],
] , )
@require_torch
def lowerCAmelCase__ ( self: Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
UpperCAmelCase_ ="./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ ="How many cats are there?"
UpperCAmelCase_ =vqa_pipeline(image=_lowerCAmelCase , question="How many cats are there?" , top_k=2 )
self.assertEqual(
_lowerCAmelCase , [{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}, {"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}] )
UpperCAmelCase_ =vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
_lowerCAmelCase , [{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}, {"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}] )
@slow
@require_torch
def lowerCAmelCase__ ( self: List[str] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
UpperCAmelCase_ ="./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ ="How many cats are there?"
UpperCAmelCase_ =vqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
UpperCAmelCase_ =vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
UpperCAmelCase_ =vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [[{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def lowerCAmelCase__ ( self: int ) -> List[str]:
'''simple docstring'''
pass
| 54 | 0 |
from __future__ import annotations
import os
from collections.abc import Mapping
__A = tuple[int, int]
class _A :
"""simple docstring"""
def __init__( self : Any , __SCREAMING_SNAKE_CASE : set[int] , __SCREAMING_SNAKE_CASE : Mapping[EdgeT, int] ) -> None:
__UpperCAmelCase =vertices
__UpperCAmelCase ={
(min(_lowerCAmelCase ), max(_lowerCAmelCase )): weight for edge, weight in edges.items()
}
def _a ( self : int , __SCREAMING_SNAKE_CASE : EdgeT , __SCREAMING_SNAKE_CASE : int ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__UpperCAmelCase =weight
def _a ( self : str ) -> Graph:
__UpperCAmelCase =Graph({min(self.vertices )} , {} )
__UpperCAmelCase =42
__UpperCAmelCase =42
__UpperCAmelCase =42
__UpperCAmelCase =42
while len(subgraph.vertices ) < len(self.vertices ):
__UpperCAmelCase =max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__UpperCAmelCase =edge
__UpperCAmelCase =weight
subgraph.add_edge(_lowerCAmelCase , _lowerCAmelCase )
return subgraph
def lowercase__ ( A_: int = "p107_network.txt" ) -> Any:
"""simple docstring"""
__UpperCAmelCase =os.path.abspath(os.path.dirname(lowercase__ ) )
__UpperCAmelCase =os.path.join(lowercase__ , lowercase__ )
__UpperCAmelCase ={}
__UpperCAmelCase =42
__UpperCAmelCase =42
__UpperCAmelCase =42
with open(lowercase__ ) as f:
__UpperCAmelCase =f.read().strip().split("""\n""" )
__UpperCAmelCase =[line.split(""",""" ) for line in data]
for edgea in range(1 , len(lowercase__ ) ):
for edgea in range(lowercase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
__UpperCAmelCase =int(adjaceny_matrix[edgea][edgea] )
__UpperCAmelCase =Graph(set(range(len(lowercase__ ) ) ) , lowercase__ )
__UpperCAmelCase =graph.prims_algorithm()
__UpperCAmelCase =sum(graph.edges.values() )
__UpperCAmelCase =sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"""{solution() = }""")
| 68 |
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
UpperCAmelCase_ =[p / w for p, w in zip(lowercase__ , lowercase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
UpperCAmelCase_ =sorted(lowercase__ )
# declaring useful variables
UpperCAmelCase_ =len(lowercase__ )
UpperCAmelCase_ =0
UpperCAmelCase_ =0
UpperCAmelCase_ =0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
UpperCAmelCase_ =sorted_profit_by_weight[length - i - 1]
UpperCAmelCase_ =profit_by_weight.index(lowercase__ )
UpperCAmelCase_ =-1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
__lowercase : List[str] =[int(x) for x in input("""Input profits separated by spaces: """).split()]
__lowercase : Union[str, Any] =[int(x) for x in input("""Input weights separated by spaces: """).split()]
__lowercase : Tuple =int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 54 | 0 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Optional[int] = []
for line in lines:
__magic_name__ :Tuple = re.sub(R'''#.*''', '''''', lowercase__ ) # remove comments
if line:
filtered_lines.append(lowercase__ )
__magic_name__ :List[Any] = '''\n'''.join(lowercase__ )
# Make a hash from all this code
__magic_name__ :Optional[Any] = full_str.encode('''utf-8''' )
return shaaaa(lowercase__ ).hexdigest()
# get importable module names and hash for caching
SCREAMING_SNAKE_CASE__ : Tuple = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
SCREAMING_SNAKE_CASE__ : Optional[int] = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
SCREAMING_SNAKE_CASE__ : Dict = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
SCREAMING_SNAKE_CASE__ : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowercase : Dict ={
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any =["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] =[
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] =[
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__lowercase : Union[str, Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 54 | 0 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A_ = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class __lowercase ( __lowercase , unittest.TestCase ):
lowercase = PegasusTokenizer
lowercase = PegasusTokenizerFast
lowercase = True
lowercase = True
def __a ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase = PegasusTokenizer(_lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __a ( self : List[Any] ) -> int:
'''simple docstring'''
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def __a ( self : Dict , **__lowerCamelCase : int ) -> PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __a ( self : Tuple , __lowerCamelCase : List[str] ) -> Dict:
'''simple docstring'''
return ("This is a test", "This is a test")
def __a ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowercase = '''</s>'''
lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) , _lowerCAmelCase )
def __a ( self : int ) -> List[str]:
'''simple docstring'''
lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(_lowerCAmelCase ) , 11_03 )
def __a ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 11_03 )
def __a ( self : Any ) -> List[Any]:
'''simple docstring'''
lowercase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
lowercase = rust_tokenizer([raw_input_str] , return_tensors=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids[0]
lowercase = py_tokenizer([raw_input_str] , return_tensors=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids[0]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def __a ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowercase = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
lowercase = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
lowercase = tokenizer([raw_input_str] , return_tensors=_lowerCAmelCase ).input_ids[0]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def __a ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowercase = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_61_03
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_03
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 10_24
lowercase = '''To ensure a smooth flow of bank resolutions.'''
lowercase = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
lowercase = tokenizer([raw_input_str] , return_tensors=_lowerCAmelCase ).input_ids[0]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def __a ( self : str ) -> Any:
'''simple docstring'''
lowercase = ['''This is going to be way too long.''' * 1_50, '''short example''']
lowercase = ['''not super long but more than 5 tokens''', '''tiny''']
lowercase = self._large_tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors='''pt''' )
lowercase = self._large_tokenizer(
text_target=_lowerCAmelCase , max_length=5 , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 10_24)
assert batch.attention_mask.shape == (2, 10_24)
assert targets["input_ids"].shape == (2, 5)
assert len(_lowerCAmelCase ) == 2 # input_ids, attention_mask.
@slow
def __a ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase = {'''input_ids''': [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class __lowercase ( __lowercase , unittest.TestCase ):
lowercase = PegasusTokenizer
lowercase = PegasusTokenizerFast
lowercase = True
lowercase = True
def __a ( self : List[str] ) -> Dict:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase = PegasusTokenizer(_lowerCAmelCase , offset=0 , mask_token_sent=_lowerCAmelCase , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __a ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def __a ( self : List[str] , **__lowerCamelCase : Tuple ) -> PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __a ( self : Optional[int] , __lowerCamelCase : Any ) -> List[str]:
'''simple docstring'''
return ("This is a test", "This is a test")
def __a ( self : List[Any] ) -> Dict:
'''simple docstring'''
lowercase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
lowercase = rust_tokenizer([raw_input_str] , return_tensors=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids[0]
lowercase = py_tokenizer([raw_input_str] , return_tensors=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids[0]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
@require_torch
def __a ( self : Dict ) -> Tuple:
'''simple docstring'''
lowercase = ['''This is going to be way too long.''' * 10_00, '''short example''']
lowercase = ['''not super long but more than 5 tokens''', '''tiny''']
lowercase = self._large_tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors='''pt''' )
lowercase = self._large_tokenizer(
text_target=_lowerCAmelCase , max_length=5 , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 40_96)
assert batch.attention_mask.shape == (2, 40_96)
assert targets["input_ids"].shape == (2, 5)
assert len(_lowerCAmelCase ) == 2 # input_ids, attention_mask.
def __a ( self : List[Any] ) -> int:
'''simple docstring'''
lowercase = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
lowercase = self._large_tokenizer(_lowerCAmelCase ).input_ids
self.assertListEqual(
_lowerCAmelCase , [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1] , )
| 604 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a__ ( lowercase__ , lowercase__ , lowercase__=1_0_2_4 , lowercase__=1_0_2_4 , lowercase__=False , **lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =AutoTokenizer.from_pretrained(lowercase__ )
UpperCAmelCase_ =SeqaSeqDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ , type_path="train" , **lowercase__ )
UpperCAmelCase_ =tok.pad_token_id
def get_lens(lowercase__ ):
UpperCAmelCase_ =tqdm(
DataLoader(lowercase__ , batch_size=5_1_2 , num_workers=8 , shuffle=lowercase__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCAmelCase_ =[]
for batch in dl:
UpperCAmelCase_ =batch["input_ids"].ne(lowercase__ ).sum(1 ).tolist()
UpperCAmelCase_ =batch["labels"].ne(lowercase__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowercase__ , lowercase__ ):
max_lens.append(max(lowercase__ , lowercase__ ) )
else:
max_lens.extend(lowercase__ )
return max_lens
UpperCAmelCase_ =get_lens(lowercase__ )
UpperCAmelCase_ =SeqaSeqDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ , type_path="val" , **lowercase__ )
UpperCAmelCase_ =get_lens(lowercase__ )
pickle_save(lowercase__ , train_ds.len_file )
pickle_save(lowercase__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 54 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( lowercase_ ,lowercase_ ) -> Any:
"""simple docstring"""
if b == 0:
return (1, 0)
((_UpperCamelCase), (_UpperCamelCase)) : List[Any] = extended_euclid(lowercase__ ,a % b )
_UpperCamelCase : int = a // b
return (y, x - k * y)
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
((_UpperCamelCase), (_UpperCamelCase)) : Optional[int] = extended_euclid(lowercase__ ,lowercase__ )
_UpperCamelCase : Union[str, Any] = na * na
_UpperCamelCase : List[Any] = ra * x * na + ra * y * na
return (n % m + m) % m
def lowercase__ ( lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
((_UpperCamelCase), (_UpperCamelCase)) : int = extended_euclid(lowercase__ ,lowercase__ )
if b < 0:
_UpperCamelCase : Tuple = (b % n + n) % n
return b
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase, _UpperCamelCase : Tuple = invert_modulo(lowercase__ ,lowercase__ ), invert_modulo(lowercase__ ,lowercase__ )
_UpperCamelCase : Any = na * na
_UpperCamelCase : Tuple = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 624 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A :
def __init__( self: Any , _lowerCAmelCase: str , _lowerCAmelCase: Optional[Any]=13 , _lowerCAmelCase: List[str]=30 , _lowerCAmelCase: List[Any]=2 , _lowerCAmelCase: List[str]=3 , _lowerCAmelCase: Dict=True , _lowerCAmelCase: int=True , _lowerCAmelCase: Tuple=32 , _lowerCAmelCase: str=2 , _lowerCAmelCase: Dict=4 , _lowerCAmelCase: Dict=37 , _lowerCAmelCase: Optional[Any]="gelu" , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: Union[str, Any]=10 , _lowerCAmelCase: str=0.02 , _lowerCAmelCase: Optional[Any]=3 , _lowerCAmelCase: Optional[int]=None , ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =image_size
UpperCAmelCase_ =patch_size
UpperCAmelCase_ =num_channels
UpperCAmelCase_ =is_training
UpperCAmelCase_ =use_labels
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =num_attention_heads
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =type_sequence_label_size
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ =(image_size // patch_size) ** 2
UpperCAmelCase_ =num_patches + 1
def lowerCAmelCase__ ( self: Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ =None
if self.use_labels:
UpperCAmelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ =self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: int , _lowerCAmelCase: Any , _lowerCAmelCase: List[str] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =TFViTModel(config=_lowerCAmelCase )
UpperCAmelCase_ =model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ =self.image_size // 2
UpperCAmelCase_ =pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ =model(_lowerCAmelCase , interpolate_pos_encoding=_lowerCAmelCase , training=_lowerCAmelCase )
UpperCAmelCase_ =(image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.type_sequence_label_size
UpperCAmelCase_ =TFViTForImageClassification(_lowerCAmelCase )
UpperCAmelCase_ =model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ =self.image_size // 2
UpperCAmelCase_ =pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ =model(_lowerCAmelCase , interpolate_pos_encoding=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ =1
UpperCAmelCase_ =TFViTForImageClassification(_lowerCAmelCase )
UpperCAmelCase_ =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ =model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ ={"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class A ( __lowercase , __lowercase , unittest.TestCase ):
_snake_case =(TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
_snake_case =(
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
_snake_case =False
_snake_case =False
_snake_case =False
def lowerCAmelCase__ ( self: int ) -> int:
'''simple docstring'''
UpperCAmelCase_ =TFViTModelTester(self )
UpperCAmelCase_ =ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowerCAmelCase__ ( self: Dict ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowerCAmelCase__ ( self: int ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: List[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ =model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase_ =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , tf.keras.layers.Layer ) )
def lowerCAmelCase__ ( self: List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ =model_class(_lowerCAmelCase )
UpperCAmelCase_ =inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ =[*signature.parameters.keys()]
UpperCAmelCase_ =["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def lowerCAmelCase__ ( self: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def lowerCAmelCase__ ( self: Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =TFViTModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(_lowerCAmelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class A ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self: Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" )
UpperCAmelCase_ =self.default_image_processor
UpperCAmelCase_ =prepare_img()
UpperCAmelCase_ =image_processor(images=_lowerCAmelCase , return_tensors="tf" )
# forward pass
UpperCAmelCase_ =model(**_lowerCAmelCase )
# verify the logits
UpperCAmelCase_ =tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
UpperCAmelCase_ =tf.constant([-0.27_44, 0.82_15, -0.08_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 )
| 54 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : Dict ={
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any =["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] =[
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] =[
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 359 |
from __future__ import annotations
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) == 0:
return False
UpperCAmelCase_ =len(lowercase__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , lowercase__ )
else:
return binary_search(a_list[midpoint + 1 :] , lowercase__ )
if __name__ == "__main__":
__lowercase : Tuple =input("""Enter numbers separated by comma:\n""").strip()
__lowercase : Optional[Any] =[int(item.strip()) for item in user_input.split(""",""")]
__lowercase : List[Any] =int(input("""Enter the number to be found in the list:\n""").strip())
__lowercase : Optional[Any] ="""""" if binary_search(sequence, target) else """not """
print(f"""{target} was {not_str}found in {sequence}""")
| 54 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A_ : int = logging.get_logger(__name__)
class lowerCamelCase (__lowercase ):
lowerCamelCase__ : int = ['pixel_values']
def __init__( self : List[Any] , __UpperCAmelCase : bool = True , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : float = None , __UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCAmelCase : bool = True , __UpperCAmelCase : Union[int, float] = 1 / 2_5_5 , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , **__UpperCAmelCase : Optional[int] , ) -> None:
super().__init__(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = size if size is not None else {"""shortest_edge""": 3_8_4}
SCREAMING_SNAKE_CASE__ = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
# Default value set here for backwards compatibility where the value in config is None
SCREAMING_SNAKE_CASE__ = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
SCREAMING_SNAKE_CASE__ = resample
SCREAMING_SNAKE_CASE__ = do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Dict[str, int] , __UpperCAmelCase : float , __UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : Any , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F"""Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ = size["""shortest_edge"""]
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
SCREAMING_SNAKE_CASE__ = int(shortest_edge / crop_pct )
SCREAMING_SNAKE_CASE__ = get_resize_output_image_size(_lowerCAmelCase , size=_lowerCAmelCase , default_to_square=_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_lowerCAmelCase , size=(shortest_edge, shortest_edge) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_lowerCAmelCase , size=(shortest_edge, shortest_edge) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Union[int, float] , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : str , ) -> Optional[Any]:
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Union[float, List[float]] , __UpperCAmelCase : Union[float, List[float]] , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : Dict , ) -> np.ndarray:
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : ImageInput , __UpperCAmelCase : bool = None , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : float = None , __UpperCAmelCase : PILImageResampling = None , __UpperCAmelCase : bool = None , __UpperCAmelCase : float = None , __UpperCAmelCase : bool = None , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , __UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **__UpperCAmelCase : Optional[Any] , ) -> PIL.Image.Image:
SCREAMING_SNAKE_CASE__ = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ = crop_pct if crop_pct is not None else self.crop_pct
SCREAMING_SNAKE_CASE__ = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ = [self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , crop_pct=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ = [self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ = [self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE__ = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE__ = {"""pixel_values""": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 196 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__lowercase : Any =(
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
__lowercase : Union[str, Any] =(
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
__lowercase : List[str] =(
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
__lowercase : str =(
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
__lowercase : Union[str, Any] =(
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 14]),
("""2H 5D 3C AS 5S""", False, [14, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [14, 13, 12, 11, 10]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
__lowercase : str =(
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
__lowercase : int =(
("""JH AH TH KH QH""", 23),
("""JH 9H TH KH QH""", 22),
("""JC KH JS JD JH""", 21),
("""KH KC 3S 3H 3D""", 20),
("""8C 9C 5C 3C TC""", 19),
("""JS QS 9H TS KH""", 18),
("""7C 7S KH 2H 7H""", 17),
("""3C KH 5D 5S KH""", 16),
("""QH 8H KD JH 8S""", 15),
("""2D 6D 9D TH 7D""", 14),
)
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =randrange(len(lowercase__ ) ), randrange(len(lowercase__ ) )
UpperCAmelCase_ =["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)]
UpperCAmelCase_ , UpperCAmelCase_ =SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def a__ ( lowercase__ = 1_0_0 ):
'''simple docstring'''
return (generate_random_hand() for _ in range(lowercase__ ))
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_flush() == expected
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_straight() == expected
@pytest.mark.parametrize("hand, expected, card_values" , lowercase__ )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =PokerHand(lowercase__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_same_kind() == expected
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._hand_type == expected
@pytest.mark.parametrize("hand, other, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
@pytest.mark.parametrize("hand, other, expected" , generate_random_hands() )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[PokerHand(lowercase__ ) for hand in SORTED_HANDS]
UpperCAmelCase_ =poker_hands.copy()
shuffle(lowercase__ )
UpperCAmelCase_ =chain(sorted(lowercase__ ) )
for index, hand in enumerate(lowercase__ ):
assert hand == poker_hands[index]
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[PokerHand("2D AC 3H 4H 5S" ), PokerHand("2S 3H 4H 5S 6C" )]
pokerhands.sort(reverse=lowercase__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =PokerHand("2C 4S AS 3D 5C" )
UpperCAmelCase_ =True
UpperCAmelCase_ =[5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =0
UpperCAmelCase_ =os.path.abspath(os.path.dirname(lowercase__ ) )
UpperCAmelCase_ =os.path.join(lowercase__ , "poker_hands.txt" )
with open(lowercase__ ) as file_hand:
for line in file_hand:
UpperCAmelCase_ =line[:1_4].strip()
UpperCAmelCase_ =line[1_5:].strip()
UpperCAmelCase_ , UpperCAmelCase_ =PokerHand(lowercase__ ), PokerHand(lowercase__ )
UpperCAmelCase_ =player.compare_with(lowercase__ )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 54 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all BART models at https://huggingface.co/models?filter=bart
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json""",
},
}
_lowerCAmelCase = {
"""facebook/bart-base""": 1_0_2_4,
"""facebook/bart-large""": 1_0_2_4,
"""facebook/bart-large-mnli""": 1_0_2_4,
"""facebook/bart-large-cnn""": 1_0_2_4,
"""facebook/bart-large-xsum""": 1_0_2_4,
"""yjernite/bart_eli5""": 1_0_2_4,
}
class __UpperCamelCase ( __lowercase ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
_UpperCAmelCase = BartTokenizer
def __init__( self ,_A=None ,_A=None ,_A=None ,_A="replace" ,_A="<s>" ,_A="</s>" ,_A="</s>" ,_A="<s>" ,_A="<unk>" ,_A="<pad>" ,_A="<mask>" ,_A=False ,_A=True ,**_A ,):
'''simple docstring'''
super().__init__(
_lowerCAmelCase ,_lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,errors=_lowerCAmelCase ,bos_token=_lowerCAmelCase ,eos_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ,trim_offsets=_lowerCAmelCase ,**_lowerCAmelCase ,)
_lowerCAmelCase : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' ,_lowerCAmelCase ) != add_prefix_space:
_lowerCAmelCase : str = getattr(_lowerCAmelCase ,pre_tok_state.pop('type' ) )
_lowerCAmelCase : Tuple = add_prefix_space
_lowerCAmelCase : Tuple = pre_tok_class(**_lowerCAmelCase )
_lowerCAmelCase : Optional[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCAmelCase : Optional[Any] = 'post_processor'
_lowerCAmelCase : int = getattr(self.backend_tokenizer ,_lowerCAmelCase ,_lowerCAmelCase )
if tokenizer_component_instance:
_lowerCAmelCase : Union[str, Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCAmelCase : List[Any] = tuple(state['sep'] )
if "cls" in state:
_lowerCAmelCase : str = tuple(state['cls'] )
_lowerCAmelCase : Tuple = False
if state.get('add_prefix_space' ,_lowerCAmelCase ) != add_prefix_space:
_lowerCAmelCase : List[Any] = add_prefix_space
_lowerCAmelCase : Any = True
if state.get('trim_offsets' ,_lowerCAmelCase ) != trim_offsets:
_lowerCAmelCase : List[Any] = trim_offsets
_lowerCAmelCase : Tuple = True
if changes_to_apply:
_lowerCAmelCase : List[str] = getattr(_lowerCAmelCase ,state.pop('type' ) )
_lowerCAmelCase : Optional[int] = component_class(**_lowerCAmelCase )
setattr(self.backend_tokenizer ,_lowerCAmelCase ,_lowerCAmelCase )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else value
_lowerCAmelCase : Dict = value
def __lowerCamelCase ( self ,*_A ,**_A ):
'''simple docstring'''
_lowerCAmelCase : str = kwargs.get('is_split_into_words' ,_lowerCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*_lowerCAmelCase ,**_lowerCAmelCase )
def __lowerCamelCase ( self ,*_A ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = kwargs.get('is_split_into_words' ,_lowerCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*_lowerCAmelCase ,**_lowerCAmelCase )
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : Any = self._tokenizer.model.save(_lowerCAmelCase ,name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 259 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowercase : int =logging.get_logger(__name__)
class A ( __lowercase ):
_snake_case =['''pixel_values''']
def __init__( self: List[Any] , _lowerCAmelCase: bool = True , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: float = None , _lowerCAmelCase: PILImageResampling = PILImageResampling.BILINEAR , _lowerCAmelCase: bool = True , _lowerCAmelCase: Union[int, float] = 1 / 255 , _lowerCAmelCase: bool = True , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , **_lowerCAmelCase: Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
UpperCAmelCase_ =size if size is not None else {"shortest_edge": 384}
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =do_resize
UpperCAmelCase_ =size
# Default value set here for backwards compatibility where the value in config is None
UpperCAmelCase_ =crop_pct if crop_pct is not None else 224 / 256
UpperCAmelCase_ =resample
UpperCAmelCase_ =do_rescale
UpperCAmelCase_ =rescale_factor
UpperCAmelCase_ =do_normalize
UpperCAmelCase_ =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ =image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Dict[str, int] , _lowerCAmelCase: float , _lowerCAmelCase: PILImageResampling = PILImageResampling.BICUBIC , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Any , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
UpperCAmelCase_ =size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
UpperCAmelCase_ =int(shortest_edge / crop_pct )
UpperCAmelCase_ =get_resize_output_image_size(_lowerCAmelCase , size=_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_lowerCAmelCase , size=(shortest_edge, shortest_edge) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_lowerCAmelCase , size=(shortest_edge, shortest_edge) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Union[int, float] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: str , ) -> Optional[Any]:
'''simple docstring'''
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Union[float, List[float]] , _lowerCAmelCase: Union[float, List[float]] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Dict , ) -> np.ndarray:
'''simple docstring'''
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: ImageInput , _lowerCAmelCase: bool = None , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: float = None , _lowerCAmelCase: PILImageResampling = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: float = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[str, TensorType]] = None , _lowerCAmelCase: ChannelDimension = ChannelDimension.FIRST , **_lowerCAmelCase: Optional[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCAmelCase_ =do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ =crop_pct if crop_pct is not None else self.crop_pct
UpperCAmelCase_ =resample if resample is not None else self.resample
UpperCAmelCase_ =do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ =do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ =image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ =image_std if image_std is not None else self.image_std
UpperCAmelCase_ =size if size is not None else self.size
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ =[to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
UpperCAmelCase_ =[self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , crop_pct=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_rescale:
UpperCAmelCase_ =[self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
if do_normalize:
UpperCAmelCase_ =[self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images]
UpperCAmelCase_ =[to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
UpperCAmelCase_ ={"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 54 | 0 |
from __future__ import annotations
def _UpperCAmelCase (UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] ):
'''simple docstring'''
if len(lowercase__ ) == 0:
return False
_lowerCAmelCase : Any = len(lowercase__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , lowercase__ )
else:
return binary_search(a_list[midpoint + 1 :] , lowercase__ )
if __name__ == "__main__":
_lowerCamelCase : Tuple = input("Enter numbers separated by comma:\n").strip()
_lowerCamelCase : Optional[Any] = [int(item.strip()) for item in user_input.split(",")]
_lowerCamelCase : List[Any] = int(input("Enter the number to be found in the list:\n").strip())
_lowerCamelCase : Optional[Any] = """""" if binary_search(sequence, target) else """not """
print(F'''{target} was {not_str}found in {sequence}''')
| 429 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__lowercase : List[Any] =WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =test_results.split(" " )
UpperCAmelCase_ =0
UpperCAmelCase_ =0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
UpperCAmelCase_ =expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase__ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={}
UpperCAmelCase_ =None
UpperCAmelCase_ =False
for line in failures_short_lines.split("\n" ):
if re.search(R"_ \[doctest\]" , lowercase__ ):
UpperCAmelCase_ =True
UpperCAmelCase_ =line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
UpperCAmelCase_ =line
UpperCAmelCase_ =False
return failures
class A :
def __init__( self: Optional[Any] , _lowerCAmelCase: str , _lowerCAmelCase: Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =title
UpperCAmelCase_ =doc_test_results["time_spent"].split("," )[0]
UpperCAmelCase_ =doc_test_results["success"]
UpperCAmelCase_ =doc_test_results["failures"]
UpperCAmelCase_ =self.n_success + self.n_failures
# Failures and success of the modeling tests
UpperCAmelCase_ =doc_test_results
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =[self._time_spent]
UpperCAmelCase_ =0
for time in time_spent:
UpperCAmelCase_ =time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_lowerCAmelCase ) == 1:
UpperCAmelCase_ =[0, 0, time_parts[0]]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F'{int(_lowerCAmelCase )}h{int(_lowerCAmelCase )}m{int(_lowerCAmelCase )}s'
@property
def lowerCAmelCase__ ( self: int ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
F' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def lowerCAmelCase__ ( self: Tuple ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =40
UpperCAmelCase_ ={k: v["failed"] for k, v in doc_test_results.items() if isinstance(_lowerCAmelCase , _lowerCAmelCase )}
UpperCAmelCase_ =""
for category, failures in category_failures.items():
if len(_lowerCAmelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += F'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_lowerCAmelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =[self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_lowerCAmelCase )
@staticmethod
def lowerCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =[
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(_lowerCAmelCase )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=_lowerCAmelCase , )
def lowerCAmelCase__ ( self: Dict ) -> List[str]:
'''simple docstring'''
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
UpperCAmelCase_ =F'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else "All tests passed."
UpperCAmelCase_ =client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=_lowerCAmelCase , )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[str] , _lowerCAmelCase: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =""
for key, value in failures.items():
UpperCAmelCase_ =value[:200] + " [Truncated]" if len(_lowerCAmelCase ) > 250 else value
failures_text += F'*{key}*\n_{value}_\n\n'
UpperCAmelCase_ =job_name
UpperCAmelCase_ ={"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
UpperCAmelCase_ ={
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowerCAmelCase__ ( self: Any ) -> List[str]:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
UpperCAmelCase_ =self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
UpperCAmelCase_ =sorted(self.doc_test_results.items() , key=lambda _lowerCAmelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
UpperCAmelCase_ =F'*Num failures* :{len(job_result["failed"] )} \n'
UpperCAmelCase_ =job_result["failures"]
UpperCAmelCase_ =self.get_reply_blocks(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , text=_lowerCAmelCase )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=F'Results for {job}' , blocks=_lowerCAmelCase , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =os.environ["GITHUB_RUN_ID"]
UpperCAmelCase_ =F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
UpperCAmelCase_ =requests.get(lowercase__ ).json()
UpperCAmelCase_ ={}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
UpperCAmelCase_ =math.ceil((result["total_count"] - 1_0_0) / 1_0_0 )
for i in range(lowercase__ ):
UpperCAmelCase_ =requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , lowercase__ )
return {}
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={}
if os.path.exists(lowercase__ ):
UpperCAmelCase_ =os.listdir(lowercase__ )
for file in files:
try:
with open(os.path.join(lowercase__ , lowercase__ ) , encoding="utf-8" ) as f:
UpperCAmelCase_ =f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowercase__ , lowercase__ )}.' ) from e
return _artifact
def a__ ( ):
'''simple docstring'''
class A :
def __init__( self: Tuple , _lowerCAmelCase: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =name
UpperCAmelCase_ =[]
def __str__( self: Optional[int] ) -> Tuple:
'''simple docstring'''
return self.name
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: str ) -> List[Any]:
'''simple docstring'''
self.paths.append({"name": self.name, "path": path} )
UpperCAmelCase_ ={}
UpperCAmelCase_ =filter(os.path.isdir , os.listdir() )
for directory in directories:
UpperCAmelCase_ =directory
if artifact_name not in _available_artifacts:
UpperCAmelCase_ =Artifact(lowercase__ )
_available_artifacts[artifact_name].add_path(lowercase__ )
return _available_artifacts
if __name__ == "__main__":
__lowercase : str =get_job_links()
__lowercase : Dict =retrieve_available_artifacts()
__lowercase : Optional[int] =collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__lowercase : Any ={
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__lowercase : Tuple =github_actions_job_links.get("""run_doctests""")
__lowercase : int =available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
__lowercase : str =retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
__lowercase , __lowercase , __lowercase : Tuple =handle_test_results(artifact["""stats"""])
__lowercase : int =failed
__lowercase : int =success
__lowercase : str =time_spent[1:-1] + """, """
__lowercase : str =extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
__lowercase : int =line.replace("""FAILED """, """""")
__lowercase : List[Any] =line.split()[0].replace("""\n""", """""")
if "::" in line:
__lowercase , __lowercase : Any =line.split("""::""")
else:
__lowercase , __lowercase : Dict =line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__lowercase : Optional[int] =docs[file_regex]
doc_test_results[category]["failed"].append(test)
__lowercase : Tuple =all_failures[test] if test in all_failures else """N/A"""
__lowercase : Optional[int] =failure
break
__lowercase : Optional[int] =Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 54 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"""vocab_file""": """vocab.txt"""}
_snake_case = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
_snake_case = {
"""YituTech/conv-bert-base""": 512,
"""YituTech/conv-bert-medium-small""": 512,
"""YituTech/conv-bert-small""": 512,
}
_snake_case = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class a__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Dict = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Dict = ConvBertTokenizer
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase="[UNK]" , _UpperCamelCase="[SEP]" , _UpperCamelCase="[PAD]" , _UpperCamelCase="[CLS]" , _UpperCamelCase="[MASK]" , _UpperCamelCase=True , _UpperCamelCase=None , **_UpperCamelCase , ):
"""simple docstring"""
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
_lowercase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _lowerCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , _lowerCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _lowerCAmelCase ) != tokenize_chinese_chars
):
_lowercase : str = getattr(_lowerCAmelCase , normalizer_state.pop("type" ) )
_lowercase : Any = do_lower_case
_lowercase : Union[str, Any] = strip_accents
_lowercase : Tuple = tokenize_chinese_chars
_lowercase : Optional[Any] = normalizer_class(**_lowerCAmelCase )
_lowercase : Tuple = do_lower_case
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
_lowercase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None ):
"""simple docstring"""
_lowercase : Optional[Any] = [self.sep_token_id]
_lowercase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None ):
"""simple docstring"""
_lowercase : List[Any] = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 245 |
def a__ ( lowercase__ = 2_0_0 ):
'''simple docstring'''
UpperCAmelCase_ =[1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
UpperCAmelCase_ =[0] * (pence + 1)
UpperCAmelCase_ =1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowercase__ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 7_3682
| 54 | 0 |
from __future__ import annotations
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ) -> Optional[int]:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322 |
import sys
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =len(lowercase__ )
UpperCAmelCase_ =[[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
UpperCAmelCase_ =[[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
for chain_length in range(2 , lowercase__ ):
for a in range(1 , n - chain_length + 1 ):
UpperCAmelCase_ =a + chain_length - 1
UpperCAmelCase_ =sys.maxsize
for c in range(lowercase__ , lowercase__ ):
UpperCAmelCase_ =(
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCAmelCase_ =cost
UpperCAmelCase_ =c
return matrix, sol
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if i == j:
print("A" + str(lowercase__ ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(lowercase__ , lowercase__ , optimal_solution[i][j] )
print_optiomal_solution(lowercase__ , optimal_solution[i][j] + 1 , lowercase__ )
print(")" , end=" " )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
UpperCAmelCase_ =len(lowercase__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCAmelCase_ , UpperCAmelCase_ =matrix_chain_order(lowercase__ )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowercase__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 54 | 0 |
"""simple docstring"""
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __magic_name__ ( tf.keras.layers.Layer ):
def __init__( self , A_ , A_ , A_ = None , A_ = None ) -> List[Any]:
"""simple docstring"""
super().__init__()
_lowercase: str = pad_token_id
_lowercase: List[Any] = max_length
_lowercase: str = vocab
_lowercase: Any = merges
_lowercase: List[str] = BytePairTokenizer(_lowerCAmelCase , _lowerCAmelCase , sequence_length=_lowerCAmelCase )
@classmethod
def lowercase_ ( cls , A_ , *A_ , **A_ ) -> List[Any]:
"""simple docstring"""
_lowercase: Optional[Any] = [''' '''.join(_lowerCAmelCase ) for m in tokenizer.bpe_ranks.keys()]
_lowercase: Any = tokenizer.get_vocab()
return cls(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def lowercase_ ( cls , A_ , *A_ , **A_ ) -> List[Any]:
"""simple docstring"""
_lowercase: Tuple = GPTaTokenizer.from_pretrained(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
return cls.from_tokenizer(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def lowercase_ ( cls , A_ ) -> List[Any]:
"""simple docstring"""
return cls(**_lowerCAmelCase )
def lowercase_ ( self ) -> int:
"""simple docstring"""
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowercase_ ( self , A_ , A_ = None ) -> Optional[Any]:
"""simple docstring"""
_lowercase: List[Any] = self.tf_tokenizer(_lowerCAmelCase )
_lowercase: Any = tf.ones_like(_lowerCAmelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
_lowercase: Optional[Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
_lowercase , _lowercase: List[Any] = pad_model_inputs(
_lowerCAmelCase , max_seq_length=_lowerCAmelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 353 |
from math import loga
def a__ ( lowercase__ ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(lowercase__ , lowercase__ ):
raise TypeError("Input value must be a 'int' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 | 0 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _A ( __lowercase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Any = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict=0 ) -> Dict:
__UpperCAmelCase =floats_tensor((1, 3, 128, 128) , rng=random.Random(_lowerCAmelCase ) )
__UpperCAmelCase =np.random.RandomState(_lowerCAmelCase )
__UpperCAmelCase ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.75,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : Any ) -> Any:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**_lowerCAmelCase ).images
__UpperCAmelCase =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _a ( self : Dict ) -> Tuple:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**_lowerCAmelCase ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _a ( self : int ) -> Dict:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
# warmup pass to apply optimizations
__UpperCAmelCase =pipe(**self.get_dummy_inputs() )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**_lowerCAmelCase ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _a ( self : List[str] ) -> Optional[int]:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase =EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**_lowerCAmelCase ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _a ( self : str ) -> Optional[int]:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**_lowerCAmelCase ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _a ( self : Optional[int] ) -> str:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**_lowerCAmelCase ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
@property
def _a ( self : List[str] ) -> Dict:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _a ( self : List[str] ) -> Tuple:
__UpperCAmelCase =ort.SessionOptions()
__UpperCAmelCase =False
return options
def _a ( self : Any ) -> List[Any]:
__UpperCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__UpperCAmelCase =init_image.resize((768, 512) )
# using the PNDM scheduler by default
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__UpperCAmelCase ="""A fantasy landscape, trending on artstation"""
__UpperCAmelCase =np.random.RandomState(0 )
__UpperCAmelCase =pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" , )
__UpperCAmelCase =output.images
__UpperCAmelCase =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__UpperCAmelCase =np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _a ( self : str ) -> Union[str, Any]:
__UpperCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__UpperCAmelCase =init_image.resize((768, 512) )
__UpperCAmelCase =LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__UpperCAmelCase ="""A fantasy landscape, trending on artstation"""
__UpperCAmelCase =np.random.RandomState(0 )
__UpperCAmelCase =pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=_lowerCAmelCase , output_type="""np""" , )
__UpperCAmelCase =output.images
__UpperCAmelCase =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__UpperCAmelCase =np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 68 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Union[str, Any] =logging.get_logger(__name__)
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =torch.load(lowercase__ , map_location="cpu" )
if "model" in sd.keys():
UpperCAmelCase_ =torch.load(lowercase__ , map_location="cpu" )["model"]
# pop unnecessary weights
UpperCAmelCase_ =[
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowercase__ )
UpperCAmelCase_ ={
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
UpperCAmelCase_ =sd.pop(lowercase__ )
UpperCAmelCase_ =list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
UpperCAmelCase_ =sd[key]
# We split QKV in separate Q,K,V
UpperCAmelCase_ =key.replace(".qkv_proj." , ".q_proj." )
UpperCAmelCase_ =key.replace(".qkv_proj." , ".k_proj." )
UpperCAmelCase_ =key.replace(".qkv_proj." , ".v_proj." )
UpperCAmelCase_ =value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =torch.split(lowercase__ , depth // 3 , dim=0 )
UpperCAmelCase_ =q
UpperCAmelCase_ =k
UpperCAmelCase_ =v
del sd[key]
return sd
@torch.no_grad()
def a__ ( lowercase__ , lowercase__ , lowercase__=None ):
'''simple docstring'''
UpperCAmelCase_ =load_checkpoint(lowercase__ )
if config is not None:
UpperCAmelCase_ =OPTConfig.from_pretrained(lowercase__ )
else:
UpperCAmelCase_ =OPTConfig()
UpperCAmelCase_ =OPTModel(lowercase__ ).half().eval()
model.load_state_dict(lowercase__ )
# Check results
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
if __name__ == "__main__":
__lowercase : List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
__lowercase : str =parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 54 | 0 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class lowerCamelCase_ :
@staticmethod
def A ( *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
pass
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = hashlib.mda(image.tobytes() )
return m.hexdigest()[:1_0]
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = np.array(lowercase__ )
__magic_name__ :List[Any] = npimg.shape
return {"hash": hashimage(lowercase__ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
a__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
a__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :List[Any] = MaskGenerationPipeline(model=_lowerCAmelCase , image_processor=_lowerCAmelCase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def A ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
pass
@require_tf
@unittest.skip('''Image segmentation not implemented in TF''' )
def A ( self ):
"""simple docstring"""
pass
@slow
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' )
__magic_name__ :int = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=2_5_6 )
# Shortening by hashing
__magic_name__ :int = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(_lowerCAmelCase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.021},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0053},
{'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9967},
{'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.993},
{'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9909},
{'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9879},
{'''mask''': {'''hash''': '''801064ff79''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9834},
{'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9716},
{'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9612},
{'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9599},
{'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9552},
{'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9532},
{'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9516},
{'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9499},
{'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9483},
{'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9464},
{'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''c749b25868''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9408},
{'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9335},
{'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9326},
{'''mask''': {'''hash''': '''788b798e24''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9262},
{'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8999},
{'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8986},
{'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8984},
{'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8873},
{'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8871}
] , )
# fmt: on
@require_torch
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = '''facebook/sam-vit-huge'''
__magic_name__ :Optional[Any] = pipeline('''mask-generation''' , model=_lowerCAmelCase )
__magic_name__ :Any = image_segmenter(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=2_5_6 )
# Shortening by hashing
__magic_name__ :Any = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(_lowerCAmelCase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0210},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0053},
] , )
| 0 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
__lowercase : str ={
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
__lowercase : Any ={
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =(images / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_ =images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCAmelCase_ =numpy_to_pil(lowercase__ )
return images
def a__ ( lowercase__ ):
'''simple docstring'''
if images.ndim == 3:
UpperCAmelCase_ =images[None, ...]
UpperCAmelCase_ =(images * 2_5_5).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
UpperCAmelCase_ =[Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
UpperCAmelCase_ =[Image.fromarray(lowercase__ ) for image in images]
return pil_images
| 54 | 0 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase )-> Tuple:
"""simple docstring"""
lowercase = checkpoint
lowercase = {}
lowercase = vae_state_dict['''encoder.conv_in.weight''']
lowercase = vae_state_dict['''encoder.conv_in.bias''']
lowercase = vae_state_dict['''encoder.conv_out.weight''']
lowercase = vae_state_dict['''encoder.conv_out.bias''']
lowercase = vae_state_dict['''encoder.norm_out.weight''']
lowercase = vae_state_dict['''encoder.norm_out.bias''']
lowercase = vae_state_dict['''decoder.conv_in.weight''']
lowercase = vae_state_dict['''decoder.conv_in.bias''']
lowercase = vae_state_dict['''decoder.conv_out.weight''']
lowercase = vae_state_dict['''decoder.conv_out.bias''']
lowercase = vae_state_dict['''decoder.norm_out.weight''']
lowercase = vae_state_dict['''decoder.norm_out.bias''']
lowercase = vae_state_dict['''quant_conv.weight''']
lowercase = vae_state_dict['''quant_conv.bias''']
lowercase = vae_state_dict['''post_quant_conv.weight''']
lowercase = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
lowercase = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
lowercase = {
layer_id: [key for key in vae_state_dict if f'down.{layer_id}' in key] for layer_id in range(lowercase__ )
}
# Retrieves the keys for the decoder up blocks only
lowercase = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
lowercase = {
layer_id: [key for key in vae_state_dict if f'up.{layer_id}' in key] for layer_id in range(lowercase__ )
}
for i in range(lowercase__ ):
lowercase = [key for key in down_blocks[i] if f'down.{i}' in key and f'down.{i}.downsample' not in key]
if f'encoder.down.{i}.downsample.conv.weight' in vae_state_dict:
lowercase = vae_state_dict.pop(
f'encoder.down.{i}.downsample.conv.weight' )
lowercase = vae_state_dict.pop(
f'encoder.down.{i}.downsample.conv.bias' )
lowercase = renew_vae_resnet_paths(lowercase__ )
lowercase = {'''old''': f'down.{i}.block', '''new''': f'down_blocks.{i}.resnets'}
assign_to_checkpoint(lowercase__, lowercase__, lowercase__, additional_replacements=[meta_path], config=lowercase__ )
lowercase = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
lowercase = 2
for i in range(1, num_mid_res_blocks + 1 ):
lowercase = [key for key in mid_resnets if f'encoder.mid.block_{i}' in key]
lowercase = renew_vae_resnet_paths(lowercase__ )
lowercase = {'''old''': f'mid.block_{i}', '''new''': f'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(lowercase__, lowercase__, lowercase__, additional_replacements=[meta_path], config=lowercase__ )
lowercase = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
lowercase = renew_vae_attention_paths(lowercase__ )
lowercase = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(lowercase__, lowercase__, lowercase__, additional_replacements=[meta_path], config=lowercase__ )
conv_attn_to_linear(lowercase__ )
for i in range(lowercase__ ):
lowercase = num_up_blocks - 1 - i
lowercase = [
key for key in up_blocks[block_id] if f'up.{block_id}' in key and f'up.{block_id}.upsample' not in key
]
if f'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict:
lowercase = vae_state_dict[
f'decoder.up.{block_id}.upsample.conv.weight'
]
lowercase = vae_state_dict[
f'decoder.up.{block_id}.upsample.conv.bias'
]
lowercase = renew_vae_resnet_paths(lowercase__ )
lowercase = {'''old''': f'up.{block_id}.block', '''new''': f'up_blocks.{i}.resnets'}
assign_to_checkpoint(lowercase__, lowercase__, lowercase__, additional_replacements=[meta_path], config=lowercase__ )
lowercase = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
lowercase = 2
for i in range(1, num_mid_res_blocks + 1 ):
lowercase = [key for key in mid_resnets if f'decoder.mid.block_{i}' in key]
lowercase = renew_vae_resnet_paths(lowercase__ )
lowercase = {'''old''': f'mid.block_{i}', '''new''': f'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(lowercase__, lowercase__, lowercase__, additional_replacements=[meta_path], config=lowercase__ )
lowercase = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
lowercase = renew_vae_attention_paths(lowercase__ )
lowercase = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(lowercase__, lowercase__, lowercase__, additional_replacements=[meta_path], config=lowercase__ )
conv_attn_to_linear(lowercase__ )
return new_checkpoint
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, )-> List[str]:
"""simple docstring"""
lowercase = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
lowercase = io.BytesIO(r.content )
lowercase = OmegaConf.load(lowercase__ )
lowercase = 512
lowercase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
lowercase = {}
with safe_open(lowercase__, framework='''pt''', device='''cpu''' ) as f:
for key in f.keys():
lowercase = f.get_tensor(lowercase__ )
else:
lowercase = torch.load(lowercase__, map_location=lowercase__ )['''state_dict''']
# Convert the VAE model.
lowercase = create_vae_diffusers_config(lowercase__, image_size=lowercase__ )
lowercase = custom_convert_ldm_vae_checkpoint(lowercase__, lowercase__ )
lowercase = AutoencoderKL(**lowercase__ )
vae.load_state_dict(lowercase__ )
vae.save_pretrained(lowercase__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
A_ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 604 |
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =int(lowercase__ )
if n_element < 1:
UpperCAmelCase_ =ValueError("a should be a positive number" )
raise my_error
UpperCAmelCase_ =[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =(0, 0, 0)
UpperCAmelCase_ =1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
__lowercase : Tuple =input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
__lowercase : Union[str, Any] =hamming(int(n))
print("""-----------------------------------------------------""")
print(f"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 54 | 0 |
"""simple docstring"""
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available():
import sentencepiece as sp
lowerCamelCase__ = 5
lowerCamelCase__ = 10
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( __lowercase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = SpeechaTextTokenizer
SCREAMING_SNAKE_CASE__ :Any = False
SCREAMING_SNAKE_CASE__ :Dict = True
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
super().setUp()
_UpperCamelCase : Dict = sp.SentencePieceProcessor()
spm_model.Load(_lowerCAmelCase )
_UpperCamelCase : Union[str, Any] = ["<s>", "<pad>", "</s>", "<unk>"]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(_lowerCAmelCase ) )]
_UpperCamelCase : List[str] = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
_UpperCamelCase : Dict = Path(self.tmpdirname )
save_json(_lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["spm_file"] )
_UpperCamelCase : int = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
_UpperCamelCase : Optional[int] = "<pad>"
_UpperCamelCase : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) , _lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
_UpperCamelCase : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(_lowerCAmelCase ) , 1001 )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
_UpperCamelCase : Any = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
_UpperCamelCase : Union[str, Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(_lowerCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [289, 50, 14, 174, 386] , )
_UpperCamelCase : Union[str, Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_lowerCAmelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
_UpperCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
_UpperCamelCase : int = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = {"input_ids": [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , )
@require_sentencepiece
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Dict = "valhalla/s2t_mustc_multilinguial_medium"
SCREAMING_SNAKE_CASE__ :Any = "C\'est trop cool"
SCREAMING_SNAKE_CASE__ :Union[str, Any] = "Esto es genial"
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : str ) -> Optional[int]:
_UpperCamelCase : Tuple = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 11 )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
self.assertEqual(self.tokenizer.vocab_size , 1_0000 )
def __SCREAMING_SNAKE_CASE ( self : str ) -> int:
self.assertIn(_lowerCAmelCase , self.tokenizer.all_special_ids )
_UpperCamelCase : Optional[Any] = [ES_CODE, 4, 1601, 47, 7647, 2]
_UpperCamelCase : Optional[Any] = self.tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
_UpperCamelCase : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
_UpperCamelCase : Tuple = "fr"
_UpperCamelCase : Tuple = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , _lowerCAmelCase )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Dict = "fr"
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
_UpperCamelCase : Union[str, Any] = "es"
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 624 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__lowercase : List[Any] =logging.get_logger(__name__)
class A ( __lowercase ):
def __init__( self: List[Any] , *_lowerCAmelCase: Optional[Any] , **_lowerCAmelCase: List[str] ) -> None:
'''simple docstring'''
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 54 | 0 |
"""simple docstring"""
class _A :
def __init__( self ):
"""simple docstring"""
lowercase = 0
lowercase = 0
lowercase = {}
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if vertex not in self.adjacency:
lowercase = {}
self.num_vertices += 1
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
self.add_vertex(_lowerCAmelCase )
self.add_vertex(_lowerCAmelCase )
if head == tail:
return
lowercase = weight
lowercase = weight
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_edges()
for edge in edges:
lowercase , lowercase , lowercase = edge
edges.remove((tail, head, weight) )
for i in range(len(_lowerCAmelCase ) ):
lowercase = list(edges[i] )
edges.sort(key=lambda __lowerCAmelCase : e[2] )
for i in range(len(_lowerCAmelCase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowercase = edges[i][2] + 1
for edge in edges:
lowercase , lowercase , lowercase = edge
lowercase = weight
lowercase = weight
def __str__( self ):
"""simple docstring"""
lowercase = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowercase = self.adjacency[head][tail]
string += f'{head} -> {tail} == {weight}\n'
return string.rstrip("""\n""" )
def A__ ( self ):
"""simple docstring"""
lowercase = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def A__ ( self ):
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def A__ ( __lowerCAmelCase=None , __lowerCAmelCase=None ):
"""simple docstring"""
lowercase = Graph()
if vertices is None:
lowercase = []
if edges is None:
lowercase = []
for vertex in vertices:
g.add_vertex(_lowerCAmelCase )
for edge in edges:
g.add_edge(*_lowerCAmelCase )
return g
class _A :
def __init__( self ):
"""simple docstring"""
lowercase = {}
lowercase = {}
def __len__( self ):
"""simple docstring"""
return len(self.parent )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if item in self.parent:
return self.find(_lowerCAmelCase )
lowercase = item
lowercase = 0
return item
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if item not in self.parent:
return self.make_set(_lowerCAmelCase )
if item != self.parent[item]:
lowercase = self.find(self.parent[item] )
return self.parent[item]
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.find(_lowerCAmelCase )
lowercase = self.find(_lowerCAmelCase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowercase = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowercase = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowercase = roota
return roota
return None
@staticmethod
def A__ ( __lowerCAmelCase ):
"""simple docstring"""
lowercase = graph.num_vertices
lowercase = Graph.UnionFind()
lowercase = []
while num_components > 1:
lowercase = {}
for vertex in graph.get_vertices():
lowercase = -1
lowercase = graph.get_edges()
for edge in edges:
lowercase , lowercase , lowercase = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowercase , lowercase , lowercase = edge
lowercase = union_find.find(_lowerCAmelCase )
lowercase = union_find.find(_lowerCAmelCase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowercase , lowercase , lowercase = cheap_edge[vertex]
if union_find.find(_lowerCAmelCase ) != union_find.find(_lowerCAmelCase ):
union_find.union(_lowerCAmelCase , _lowerCAmelCase )
mst_edges.append(cheap_edge[vertex] )
lowercase = num_components - 1
lowercase = Graph.build(edges=_lowerCAmelCase )
return mst
| 359 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class A ( __lowercase , unittest.TestCase ):
_snake_case =CanineTokenizer
_snake_case =False
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
UpperCAmelCase_ =CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase__ ( self: Optional[int] ) -> List[str]:
'''simple docstring'''
return CanineTokenizer.from_pretrained("google/canine-s" )
def lowerCAmelCase__ ( self: Union[str, Any] , **_lowerCAmelCase: List[Any] ) -> CanineTokenizer:
'''simple docstring'''
UpperCAmelCase_ =self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
UpperCAmelCase_ =1024
return tokenizer
@require_torch
def lowerCAmelCase__ ( self: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
UpperCAmelCase_ =[5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
UpperCAmelCase_ =tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =list(batch.input_ids.numpy()[0] )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def lowerCAmelCase__ ( self: int ) -> str:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
UpperCAmelCase_ =tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , _lowerCAmelCase )
self.assertIn("attention_mask" , _lowerCAmelCase )
self.assertIn("token_type_ids" , _lowerCAmelCase )
@require_torch
def lowerCAmelCase__ ( self: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =[
"What's the weater?",
"It's about 25 degrees.",
]
UpperCAmelCase_ =tokenizer(
text_target=_lowerCAmelCase , max_length=32 , padding="max_length" , truncation=_lowerCAmelCase , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def lowerCAmelCase__ ( self: Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ =tempfile.mkdtemp()
UpperCAmelCase_ =" He is very happy, UNwant\u00E9d,running"
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
shutil.rmtree(_lowerCAmelCase )
UpperCAmelCase_ =self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ =tempfile.mkdtemp()
UpperCAmelCase_ =" He is very happy, UNwant\u00E9d,running"
UpperCAmelCase_ =tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
UpperCAmelCase_ =chr(0xe0_07 )
additional_special_tokens.append(_lowerCAmelCase )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertIn(_lowerCAmelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_lowerCAmelCase )
def lowerCAmelCase__ ( self: int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ , UpperCAmelCase_ =self.get_clean_sequence(_lowerCAmelCase )
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_05
UpperCAmelCase_ =chr(_lowerCAmelCase )
tokenizer.add_special_tokens({"cls_token": special_token} )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
UpperCAmelCase_ =tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , input_encoded + special_token_id )
UpperCAmelCase_ =tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def lowerCAmelCase__ ( self: Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ =chr(0xe0_05 )
UpperCAmelCase_ =chr(0xe0_06 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=_lowerCAmelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
UpperCAmelCase_ =tokenizer.tokenize(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.tokenize(_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
self.assertEqual(token_a[0] , _lowerCAmelCase )
self.assertEqual(token_a[0] , _lowerCAmelCase )
@require_tokenizers
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
UpperCAmelCase_ =AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(_lowerCAmelCase )
tokenizer.from_pretrained(_lowerCAmelCase )
def lowerCAmelCase__ ( self: Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase_ =json.load(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase_ =json.load(_lowerCAmelCase )
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
UpperCAmelCase_ =[new_token_a]
UpperCAmelCase_ =[new_token_a]
with open(os.path.join(_lowerCAmelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase_ =tokenizer_class.from_pretrained(_lowerCAmelCase , extra_ids=0 )
self.assertIn(_lowerCAmelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
UpperCAmelCase_ =0xe0_07
UpperCAmelCase_ =chr(_lowerCAmelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase_ =[AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase )]
UpperCAmelCase_ =tokenizer_class.from_pretrained(
_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , extra_ids=0 )
self.assertIn(_lowerCAmelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ ="hello world"
if self.space_between_special_tokens:
UpperCAmelCase_ ="[CLS] hello world [SEP]"
else:
UpperCAmelCase_ =input
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.decode(_lowerCAmelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(_lowerCAmelCase , [output, output.lower()] )
def lowerCAmelCase__ ( self: List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ =[
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
UpperCAmelCase_ ="a"
UpperCAmelCase_ =ord(_lowerCAmelCase )
for attr in attributes_list:
setattr(_lowerCAmelCase , attr + "_id" , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , attr + "_id" ) , _lowerCAmelCase )
setattr(_lowerCAmelCase , attr + "_id" , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , attr + "_id" ) , _lowerCAmelCase )
setattr(_lowerCAmelCase , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens_ids" ) , [] )
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
setattr(_lowerCAmelCase , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def lowerCAmelCase__ ( self: List[str] ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Dict ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Any ) -> List[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: List[Any] ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Tuple ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: str ) -> str:
'''simple docstring'''
pass
| 54 | 0 |
"""simple docstring"""
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def A ( snake_case__ , snake_case__ , snake_case__=10_24 , snake_case__=10_24 , snake_case__=False , **snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(lowercase__ )
SCREAMING_SNAKE_CASE__ = SeqaSeqDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ , type_path="""train""" , **lowercase__ )
SCREAMING_SNAKE_CASE__ = tok.pad_token_id
def get_lens(snake_case__ ):
SCREAMING_SNAKE_CASE__ = tqdm(
DataLoader(lowercase__ , batch_size=5_12 , num_workers=8 , shuffle=lowercase__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
SCREAMING_SNAKE_CASE__ = []
for batch in dl:
SCREAMING_SNAKE_CASE__ = batch["""input_ids"""].ne(lowercase__ ).sum(1 ).tolist()
SCREAMING_SNAKE_CASE__ = batch["""labels"""].ne(lowercase__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowercase__ , lowercase__ ):
max_lens.append(max(lowercase__ , lowercase__ ) )
else:
max_lens.extend(lowercase__ )
return max_lens
SCREAMING_SNAKE_CASE__ = get_lens(lowercase__ )
SCREAMING_SNAKE_CASE__ = SeqaSeqDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ , type_path="""val""" , **lowercase__ )
SCREAMING_SNAKE_CASE__ = get_lens(lowercase__ )
pickle_save(lowercase__ , train_ds.len_file )
pickle_save(lowercase__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 196 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowercase : Optional[int] ="""\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
__lowercase : Dict ="""\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
__lowercase : List[str] ="""\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def lowerCAmelCase__ ( self: int ) -> MetricInfo:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: List[List[List[str]]] , _lowerCAmelCase: List[List[str]] , _lowerCAmelCase: int = 1 , _lowerCAmelCase: int = 4 , ) -> Dict[str, float]:
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_lowerCAmelCase , hypotheses=_lowerCAmelCase , min_len=_lowerCAmelCase , max_len=_lowerCAmelCase )
}
| 54 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = jnp.ones((batch_size, length) ) / length
return scores
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : int = 20
_lowerCAmelCase : str = self._get_uniform_logits(batch_size=2 ,length=_lowerCAmelCase )
# tweak scores to not be uniform anymore
_lowerCAmelCase : Dict = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
_lowerCAmelCase : Tuple = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
_lowerCAmelCase : Any = jax.nn.softmax(_lowerCAmelCase ,axis=-1 )
_lowerCAmelCase : Any = FlaxTemperatureLogitsWarper(temperature=0.5 )
_lowerCAmelCase : Union[str, Any] = FlaxTemperatureLogitsWarper(temperature=1.3 )
_lowerCAmelCase : Dict = jax.nn.softmax(temp_dist_warper_sharper(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 )
_lowerCAmelCase : List[str] = jax.nn.softmax(temp_dist_warper_smoother(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_sharp[0, :] ,atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_smooth[0, :] ,atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() ,warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() ,warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() ,warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() ,warped_prob_smooth[1, :].min() )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : int = 10
_lowerCAmelCase : Tuple = 2
# create ramp distribution
_lowerCAmelCase : str = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy()
_lowerCAmelCase : Optional[int] = ramp_logits[1:, : vocab_size // 2] + vocab_size
_lowerCAmelCase : List[Any] = FlaxTopKLogitsWarper(3 )
_lowerCAmelCase : Any = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() ,7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() ,2 * [True] + 3 * [False] + 5 * [True] )
# check special case
_lowerCAmelCase : Optional[Any] = 5
_lowerCAmelCase : Any = FlaxTopKLogitsWarper(top_k=1 ,filter_value=0.0 ,min_tokens_to_keep=3 )
_lowerCAmelCase : str = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, length) ).copy()
_lowerCAmelCase : int = top_k_warp_safety_check(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() ,[2, 2] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = None
_lowerCAmelCase : Union[str, Any] = 10
_lowerCAmelCase : Tuple = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_lowerCAmelCase : List[Any] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
_lowerCAmelCase : Optional[int] = FlaxTopPLogitsWarper(0.8 )
_lowerCAmelCase : List[Any] = np.exp(top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_lowerCAmelCase : List[str] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# check edge cases with negative and extreme logits
_lowerCAmelCase : str = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_lowerCAmelCase : Any = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
_lowerCAmelCase : Dict = FlaxTopPLogitsWarper(0.9 ,min_tokens_to_keep=2 ,filter_value=0.0 )
_lowerCAmelCase : Dict = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() ,[3, 2] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = 20
_lowerCAmelCase : Optional[Any] = 4
_lowerCAmelCase : str = 0
_lowerCAmelCase : Dict = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
# check that min length is applied at length 5
_lowerCAmelCase : Optional[Any] = ids_tensor((batch_size, 20) ,vocab_size=20 )
_lowerCAmelCase : List[Any] = 5
_lowerCAmelCase : Optional[int] = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
_lowerCAmelCase : List[str] = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() ,4 * [-float('inf' )] )
# check that min length is not applied anymore at length 15
_lowerCAmelCase : str = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
_lowerCAmelCase : Dict = 15
_lowerCAmelCase : Dict = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 20
_lowerCAmelCase : Union[str, Any] = 4
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Dict = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the bos_token_id score
_lowerCAmelCase : List[str] = ids_tensor((batch_size, 1) ,vocab_size=20 )
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : str = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
_lowerCAmelCase : str = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() ,4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_lowerCAmelCase : List[str] = 3
_lowerCAmelCase : str = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
_lowerCAmelCase : Optional[Any] = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = 20
_lowerCAmelCase : Dict = 4
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Tuple = 5
_lowerCAmelCase : Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
_lowerCAmelCase : Optional[int] = ids_tensor((batch_size, 4) ,vocab_size=20 )
_lowerCAmelCase : str = 4
_lowerCAmelCase : Optional[Any] = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
_lowerCAmelCase : Optional[Any] = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() ,4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_lowerCAmelCase : List[Any] = 3
_lowerCAmelCase : List[Any] = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
_lowerCAmelCase : int = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = 4
_lowerCAmelCase : Optional[int] = 10
_lowerCAmelCase : Union[str, Any] = 15
_lowerCAmelCase : Tuple = 2
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : Any = 15
# dummy input_ids and scores
_lowerCAmelCase : int = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase )
_lowerCAmelCase : Optional[int] = input_ids.copy()
_lowerCAmelCase : List[Any] = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
_lowerCAmelCase : int = scores.copy()
# instantiate all dist processors
_lowerCAmelCase : Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_lowerCAmelCase : List[Any] = FlaxTopKLogitsWarper(3 )
_lowerCAmelCase : str = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_lowerCAmelCase : Optional[int] = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
_lowerCAmelCase : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
_lowerCAmelCase : str = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
_lowerCAmelCase : int = 10
# no processor list
_lowerCAmelCase : Optional[Any] = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
_lowerCAmelCase : Tuple = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
_lowerCAmelCase : Union[str, Any] = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
_lowerCAmelCase : Union[str, Any] = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
_lowerCAmelCase : int = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
_lowerCAmelCase : int = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# with processor list
_lowerCAmelCase : Union[str, Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_lowerCAmelCase : str = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 4
_lowerCAmelCase : Tuple = 10
_lowerCAmelCase : str = 15
_lowerCAmelCase : Optional[Any] = 2
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : Dict = 15
# dummy input_ids and scores
_lowerCAmelCase : Dict = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase )
_lowerCAmelCase : Dict = input_ids.copy()
_lowerCAmelCase : List[Any] = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
_lowerCAmelCase : Tuple = scores.copy()
# instantiate all dist processors
_lowerCAmelCase : str = FlaxTemperatureLogitsWarper(temperature=0.5 )
_lowerCAmelCase : int = FlaxTopKLogitsWarper(3 )
_lowerCAmelCase : str = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_lowerCAmelCase : Dict = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
_lowerCAmelCase : Dict = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
_lowerCAmelCase : Any = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
_lowerCAmelCase : int = 10
# no processor list
def run_no_processor_list(_A ,_A ,_A ):
_lowerCAmelCase : Tuple = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
_lowerCAmelCase : Tuple = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
_lowerCAmelCase : List[str] = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
_lowerCAmelCase : Optional[Any] = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
_lowerCAmelCase : List[str] = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
_lowerCAmelCase : int = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
return scores
# with processor list
def run_processor_list(_A ,_A ,_A ):
_lowerCAmelCase : Union[str, Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_lowerCAmelCase : Optional[Any] = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
return scores
_lowerCAmelCase : str = jax.jit(_lowerCAmelCase )
_lowerCAmelCase : Optional[int] = jax.jit(_lowerCAmelCase )
_lowerCAmelCase : List[Any] = jitted_run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
_lowerCAmelCase : List[str] = jitted_run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
| 259 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A ( __lowercase , unittest.TestCase ):
_snake_case =KandinskyVaaImgaImgPipeline
_snake_case =['''image_embeds''', '''negative_image_embeds''', '''image''']
_snake_case =[
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
_snake_case =[
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_snake_case =False
@property
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[str]:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__ ( self: List[str] ) -> Dict:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self: int ) -> str:
'''simple docstring'''
return 100
@property
def lowerCAmelCase__ ( self: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ ={
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCAmelCase_ =UNetaDConditionModel(**_lowerCAmelCase )
return model
@property
def lowerCAmelCase__ ( self: Any ) -> Tuple:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ =VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase__ ( self: Dict ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.dummy_unet
UpperCAmelCase_ =self.dummy_movq
UpperCAmelCase_ ={
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
UpperCAmelCase_ =DDIMScheduler(**_lowerCAmelCase )
UpperCAmelCase_ ={
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: Any , _lowerCAmelCase: Optional[Any]=0 ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
UpperCAmelCase_ =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCAmelCase )
# create init_image
UpperCAmelCase_ =floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
UpperCAmelCase_ =image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ =Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert("RGB" ).resize((256, 256) )
if str(_lowerCAmelCase ).startswith("mps" ):
UpperCAmelCase_ =torch.manual_seed(_lowerCAmelCase )
else:
UpperCAmelCase_ =torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
UpperCAmelCase_ ={
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self: int ) -> int:
'''simple docstring'''
UpperCAmelCase_ ="cpu"
UpperCAmelCase_ =self.get_dummy_components()
UpperCAmelCase_ =self.pipeline_class(**_lowerCAmelCase )
UpperCAmelCase_ =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
UpperCAmelCase_ =output.images
UpperCAmelCase_ =pipe(
**self.get_dummy_inputs(_lowerCAmelCase ) , return_dict=_lowerCAmelCase , )[0]
UpperCAmelCase_ =image[0, -3:, -3:, -1]
UpperCAmelCase_ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ =np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: List[Any] ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_img2img_frog.npy" )
UpperCAmelCase_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCAmelCase_ ="A red cartoon frog, 4k"
UpperCAmelCase_ =KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCAmelCase )
UpperCAmelCase_ =KandinskyVaaImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
UpperCAmelCase_ =pipeline.to(_lowerCAmelCase )
pipeline.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ , UpperCAmelCase_ =pipe_prior(
_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCAmelCase_ =pipeline(
image=_lowerCAmelCase , image_embeds=_lowerCAmelCase , negative_image_embeds=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
UpperCAmelCase_ =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 54 | 0 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class __snake_case (__lowercase ):
def __init__( self : int , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Dict ) -> None:
'''simple docstring'''
warnings.warn(
"""The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use VideoMAEImageProcessor instead.""" , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 429 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class A ( unittest.TestCase ):
def __init__( self: Optional[int] , _lowerCAmelCase: Tuple , _lowerCAmelCase: Optional[Any]=13 , _lowerCAmelCase: Optional[int]=7 , _lowerCAmelCase: Any=True , _lowerCAmelCase: List[Any]=True , _lowerCAmelCase: List[str]=True , _lowerCAmelCase: str=True , _lowerCAmelCase: Optional[int]=99 , _lowerCAmelCase: Any=32 , _lowerCAmelCase: Any=5 , _lowerCAmelCase: Tuple=4 , _lowerCAmelCase: Union[str, Any]=37 , _lowerCAmelCase: List[str]="gelu" , _lowerCAmelCase: Dict=0.1 , _lowerCAmelCase: Tuple=0.1 , _lowerCAmelCase: int=512 , _lowerCAmelCase: Tuple=16 , _lowerCAmelCase: Tuple=2 , _lowerCAmelCase: str=0.02 , _lowerCAmelCase: Optional[Any]=4 , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =seq_length
UpperCAmelCase_ =is_training
UpperCAmelCase_ =use_attention_mask
UpperCAmelCase_ =use_token_type_ids
UpperCAmelCase_ =use_labels
UpperCAmelCase_ =vocab_size
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =num_attention_heads
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =max_position_embeddings
UpperCAmelCase_ =type_vocab_size
UpperCAmelCase_ =type_sequence_label_size
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =num_choices
def lowerCAmelCase__ ( self: Dict ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ =None
if self.use_attention_mask:
UpperCAmelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ =None
if self.use_token_type_ids:
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ =RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self: str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self: Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ =True
UpperCAmelCase_ =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A ( __lowercase , unittest.TestCase ):
_snake_case =True
_snake_case =(
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self: Dict ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =FlaxRobertaModelTester(self )
@slow
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase_ =model_class_name.from_pretrained("roberta-base" , from_pt=_lowerCAmelCase )
UpperCAmelCase_ =model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
| 54 | 0 |
'''simple docstring'''
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class a__ ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Dict = MODEL_FOR_MASKED_LM_MAPPING
_SCREAMING_SNAKE_CASE : List[str] = TF_MODEL_FOR_MASKED_LM_MAPPING
def _lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[int] = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
_lowercase : Dict = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1E-0_5, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1E-0_5, "token": 25506, "token_str": " accuser"},
] , )
_lowercase : str = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1E-0_5,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1E-0_5,
"token": 25506,
"token_str": " accuser",
},
] , )
_lowercase : Union[str, Any] = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2E-0_5, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2E-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9E-0_5, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : str = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
_lowercase : Any = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2E-0_5, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2E-0_5, "token": 16416, "token_str": "ELS"},
] , )
_lowercase : List[str] = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2E-0_5,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2E-0_5, "token": 16416, "token_str": "ELS"},
] , )
_lowercase : Optional[Any] = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1E-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2E-0_5, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2E-0_5, "token": 13606, "token_str": " Clara"},
] , )
_lowercase : Optional[Any] = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=6 ) , [
[
{
"score": 2.2E-0_5,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2E-0_5, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2E-0_5,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2E-0_5, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Tuple = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
_lowercase : Any = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
@slow
@require_torch
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Dict = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(_lowerCAmelCase )
@slow
@require_tf
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[str] = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(_lowerCAmelCase )
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : int = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
{"sequence": "My name is John", "score": 0.0_0_8, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.0_0_7, "token": 1573, "token_str": " Chris"},
] , )
_lowercase : Optional[int] = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.2_5_1,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.2_1_4,
"token": 12790,
"token_str": " Lyon",
},
] , )
_lowercase : int = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
{"sequence": "My name is Patrick", "score": 0.0_0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.0_0_0, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.0_0_0, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[Any] = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
_lowercase : int = None
_lowercase : List[Any] = None
self.run_pipeline_test(_lowerCAmelCase , [] )
@require_tf
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[Any] = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
_lowercase : Tuple = None
_lowercase : Optional[int] = None
self.run_pipeline_test(_lowerCAmelCase , [] )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
_lowercase : Any = FillMaskPipeline(model=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
_lowercase : Dict = [
f'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Tuple = fill_masker.tokenizer
_lowercase : Tuple = fill_masker.model
_lowercase : Optional[Any] = fill_masker(
f'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
_lowerCAmelCase , [
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
] , )
_lowercase : Tuple = fill_masker([f'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
_lowerCAmelCase , [
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
] , )
_lowercase : Union[str, Any] = fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
_lowerCAmelCase , [
[
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
],
[
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
],
] , )
with self.assertRaises(_lowerCAmelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_lowerCAmelCase ):
fill_masker("This is" )
self.run_test_top_k(_lowerCAmelCase , _lowerCAmelCase )
self.run_test_targets(_lowerCAmelCase , _lowerCAmelCase )
self.run_test_top_k_targets(_lowerCAmelCase , _lowerCAmelCase )
self.fill_mask_with_duplicate_targets_and_top_k(_lowerCAmelCase , _lowerCAmelCase )
self.fill_mask_with_multiple_masks(_lowerCAmelCase , _lowerCAmelCase )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Any = tokenizer.get_vocab()
_lowercase : Any = sorted(vocab.keys() )[:2]
# Pipeline argument
_lowercase : Any = FillMaskPipeline(model=_lowerCAmelCase , tokenizer=_lowerCAmelCase , targets=_lowerCAmelCase )
_lowercase : Tuple = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_lowerCAmelCase , [
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
] , )
_lowercase : List[str] = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , _lowerCAmelCase )
_lowercase : Tuple = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(_lowerCAmelCase ) )
# Call argument
_lowercase : List[Any] = FillMaskPipeline(model=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
_lowercase : Tuple = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=_lowerCAmelCase )
self.assertEqual(
_lowerCAmelCase , [
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
] , )
_lowercase : Optional[Any] = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , _lowerCAmelCase )
_lowercase : str = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(_lowerCAmelCase ) )
# Score equivalence
_lowercase : Dict = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=_lowerCAmelCase )
_lowercase : List[str] = [top_mask["token_str"] for top_mask in outputs]
_lowercase : Any = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_lowerCAmelCase ) == set(_lowerCAmelCase ):
_lowercase : Tuple = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=_lowerCAmelCase )
_lowercase : Optional[Any] = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_lowerCAmelCase ) , nested_simplify(_lowerCAmelCase ) )
# Raises with invalid
with self.assertRaises(_lowerCAmelCase ):
_lowercase : Optional[Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_lowerCAmelCase ):
_lowercase : Tuple = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[""] )
with self.assertRaises(_lowerCAmelCase ):
_lowercase : Any = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets="" )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : str = FillMaskPipeline(model=_lowerCAmelCase , tokenizer=_lowerCAmelCase , top_k=2 )
_lowercase : Optional[Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_lowerCAmelCase , [
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
] , )
_lowercase : Dict = FillMaskPipeline(model=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
_lowercase : Tuple = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_lowerCAmelCase , [
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
] , )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , nested_simplify(_lowerCAmelCase ) )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Any = tokenizer.get_vocab()
_lowercase : Tuple = FillMaskPipeline(model=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
# top_k=2, ntargets=3
_lowercase : List[str] = sorted(vocab.keys() )[:3]
_lowercase : Optional[Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=_lowerCAmelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_lowercase : List[str] = [el["token_str"] for el in sorted(_lowerCAmelCase , key=lambda _UpperCamelCase : x["score"] , reverse=_lowerCAmelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_lowerCAmelCase ).issubset(_lowerCAmelCase ):
_lowercase : str = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=_lowerCAmelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_lowerCAmelCase ) , nested_simplify(_lowerCAmelCase ) )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Optional[Any] = FillMaskPipeline(model=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
_lowercase : Tuple = tokenizer.get_vocab()
# String duplicates + id duplicates
_lowercase : str = sorted(vocab.keys() )[:3]
_lowercase : List[Any] = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_lowercase : str = fill_masker(f'''My name is {tokenizer.mask_token}''' , targets=_lowerCAmelCase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_lowerCAmelCase ) , 3 )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Optional[int] = FillMaskPipeline(model=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
_lowercase : List[str] = fill_masker(
f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_lowerCAmelCase , [
[
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
],
[
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
],
[
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
],
] , )
| 245 |
from __future__ import annotations
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , a % b )
UpperCAmelCase_ =a // b
return (y, x - k * y)
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , lowercase__ )
UpperCAmelCase_ =na * na
UpperCAmelCase_ =ra * x * na + ra * y * na
return (n % m + m) % m
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , lowercase__ )
if b < 0:
UpperCAmelCase_ =(b % n + n) % n
return b
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =invert_modulo(lowercase__ , lowercase__ ), invert_modulo(lowercase__ , lowercase__ )
UpperCAmelCase_ =na * na
UpperCAmelCase_ =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="""chinese_remainder_theorem""", verbose=True)
testmod(name="""chinese_remainder_theorem2""", verbose=True)
testmod(name="""invert_modulo""", verbose=True)
testmod(name="""extended_euclid""", verbose=True)
| 54 | 0 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
UpperCamelCase__ = [
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
UpperCamelCase__ = [
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
UpperCamelCase__ = (
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
UpperCamelCase__ = (
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
UpperCamelCase__ = [
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ ) -> Dict:
'''simple docstring'''
for tf_name, hf_name in patterns:
_lowercase : Tuple = k.replace(lowercase__ , lowercase__ )
return k
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Union[str, Any] = BigBirdPegasusConfig(**lowercase__ )
_lowercase : Any = BigBirdPegasusForConditionalGeneration(lowercase__ )
_lowercase : Any = torch_model.state_dict()
_lowercase : int = {}
# separating decoder weights
_lowercase : int = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
_lowercase : Tuple = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() , '''tf -> hf conversion''' ):
_lowercase : Tuple = [k.endswith(lowercase__ ) for ending in KEYS_TO_IGNORE]
if any(lowercase__ ):
continue
_lowercase : Dict = DECODER_PATTERNS
_lowercase : Optional[Any] = rename_state_dict_key(lowercase__ , lowercase__ )
if new_k not in state_dict:
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
_lowercase : Optional[int] = v.T
_lowercase : Dict = torch.from_numpy(lowercase__ )
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() , '''tf -> hf conversion''' ):
_lowercase : Optional[int] = [k.endswith(lowercase__ ) for ending in KEYS_TO_IGNORE]
if any(lowercase__ ):
continue
_lowercase : Dict = REMAINING_PATTERNS
_lowercase : int = rename_state_dict_key(lowercase__ , lowercase__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
_lowercase : Optional[Any] = v.T
_lowercase : int = torch.from_numpy(lowercase__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
_lowercase : List[Any] = mapping['''model.embed_positions.weight''']
_lowercase : List[str] = mapping.pop('''model.embed_positions.weight''' )
_lowercase , _lowercase : int = torch_model.load_state_dict(lowercase__ , strict=lowercase__ )
_lowercase : Optional[Any] = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], F'no matches found for the following tf keys {extra}'
return torch_model
def UpperCamelCase__ ( UpperCAmelCase_ ) -> List[Any]:
'''simple docstring'''
_lowercase : Optional[int] = tf.train.list_variables(lowercase__ )
_lowercase : Optional[int] = {}
_lowercase : List[Any] = ['''global_step''']
for name, shape in tqdm(lowercase__ , desc='''converting tf checkpoint to dict''' ):
_lowercase : List[str] = any(pat in name for pat in ignore_name )
if skip_key:
continue
_lowercase : Optional[Any] = tf.train.load_variable(lowercase__ , lowercase__ )
_lowercase : Union[str, Any] = array
return tf_weights
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Dict = get_tf_weights_as_numpy(lowercase__ )
_lowercase : Any = convert_bigbird_pegasus(lowercase__ , lowercase__ )
torch_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 322 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__lowercase : Tuple =logging.getLogger(__name__)
__lowercase : Optional[int] =tf.data.AUTOTUNE
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config" , type=lowercase__ , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , )
parser.add_argument(
"--tokenizer" , type=lowercase__ , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , )
parser.add_argument(
"--per_replica_batch_size" , type=lowercase__ , default=8 , help="Batch size per TPU core." , )
parser.add_argument(
"--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , )
parser.add_argument(
"--tpu_name" , type=lowercase__ , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , )
parser.add_argument(
"--tpu_zone" , type=lowercase__ , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , )
parser.add_argument(
"--gcp_project" , type=lowercase__ , help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , )
parser.add_argument(
"--train_dataset" , type=lowercase__ , help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--shuffle_buffer_size" , type=lowercase__ , default=2**1_8 , help="Size of the shuffle buffer (in samples)" , )
parser.add_argument(
"--eval_dataset" , type=lowercase__ , help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--num_epochs" , type=lowercase__ , default=1 , help="Number of epochs to train for." , )
parser.add_argument(
"--learning_rate" , type=lowercase__ , default=1E-4 , help="Learning rate to use for training." , )
parser.add_argument(
"--weight_decay_rate" , type=lowercase__ , default=1E-3 , help="Weight decay rate to use for training." , )
parser.add_argument(
"--max_length" , type=lowercase__ , default=5_1_2 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , )
parser.add_argument(
"--mlm_probability" , type=lowercase__ , default=0.15 , help="Fraction of tokens to mask during training." , )
parser.add_argument("--output_dir" , type=lowercase__ , required=lowercase__ , help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id" , type=lowercase__ , help="Model ID to upload to on the Hugging Face Hub." )
UpperCAmelCase_ =parser.parse_args()
return args
def a__ ( lowercase__ ):
'''simple docstring'''
try:
if args.tpu_name:
UpperCAmelCase_ =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
UpperCAmelCase_ =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(lowercase__ )
tf.tpu.experimental.initialize_tpu_system(lowercase__ )
return tpu
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =0
for file in file_list:
UpperCAmelCase_ =file.split("/" )[-1]
UpperCAmelCase_ =re.search(R"-\d+-(\d+)\.tfrecord" , lowercase__ ).group(1 )
UpperCAmelCase_ =int(lowercase__ )
num_samples += sample_count
return num_samples
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=None ):
'''simple docstring'''
UpperCAmelCase_ =count_samples(lowercase__ )
UpperCAmelCase_ =tf.data.Dataset.from_tensor_slices(lowercase__ )
if shuffle:
UpperCAmelCase_ =dataset.shuffle(len(lowercase__ ) )
UpperCAmelCase_ =tf.data.TFRecordDataset(lowercase__ , num_parallel_reads=lowercase__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
UpperCAmelCase_ =dataset.apply(tf.data.experimental.assert_cardinality(lowercase__ ) )
UpperCAmelCase_ =dataset.map(lowercase__ , num_parallel_calls=lowercase__ )
if shuffle:
assert shuffle_buffer_size is not None
UpperCAmelCase_ =dataset.shuffle(args.shuffle_buffer_size )
UpperCAmelCase_ =dataset.batch(lowercase__ , drop_remainder=lowercase__ )
UpperCAmelCase_ =dataset.map(lowercase__ , num_parallel_calls=lowercase__ )
UpperCAmelCase_ =dataset.prefetch(lowercase__ )
return dataset
def a__ ( lowercase__ ):
'''simple docstring'''
if not args.no_tpu:
UpperCAmelCase_ =initialize_tpu(lowercase__ )
UpperCAmelCase_ =tf.distribute.TPUStrategy(lowercase__ )
else:
UpperCAmelCase_ =tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
UpperCAmelCase_ =AutoTokenizer.from_pretrained(args.tokenizer )
UpperCAmelCase_ =AutoConfig.from_pretrained(args.pretrained_model_config )
UpperCAmelCase_ =tokenizer.vocab_size
UpperCAmelCase_ =tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) )
if not training_records:
raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' )
UpperCAmelCase_ =tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) )
if not eval_records:
raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' )
UpperCAmelCase_ =count_samples(lowercase__ )
UpperCAmelCase_ =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
UpperCAmelCase_ =steps_per_epoch * args.num_epochs
with strategy.scope():
UpperCAmelCase_ =TFAutoModelForMaskedLM.from_config(lowercase__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
UpperCAmelCase_ , UpperCAmelCase_ =create_optimizer(
num_train_steps=lowercase__ , num_warmup_steps=total_train_steps // 2_0 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowercase__ , metrics=["accuracy"] )
def decode_fn(lowercase__ ):
UpperCAmelCase_ ={
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowercase__ , lowercase__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
UpperCAmelCase_ =DataCollatorForLanguageModeling(
tokenizer=lowercase__ , mlm_probability=args.mlm_probability , mlm=lowercase__ , return_tensors="tf" )
def mask_with_collator(lowercase__ ):
# TF really needs an isin() function
UpperCAmelCase_ =(
~tf.cast(batch["attention_mask"] , tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
UpperCAmelCase_ , UpperCAmelCase_ =data_collator.tf_mask_tokens(
batch["input_ids"] , vocab_size=len(lowercase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase__ , )
return batch
UpperCAmelCase_ =args.per_replica_batch_size * strategy.num_replicas_in_sync
UpperCAmelCase_ =prepare_dataset(
lowercase__ , decode_fn=lowercase__ , mask_fn=lowercase__ , batch_size=lowercase__ , shuffle=lowercase__ , shuffle_buffer_size=args.shuffle_buffer_size , )
UpperCAmelCase_ =prepare_dataset(
lowercase__ , decode_fn=lowercase__ , mask_fn=lowercase__ , batch_size=lowercase__ , shuffle=lowercase__ , )
UpperCAmelCase_ =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase__ ) )
model.fit(
lowercase__ , validation_data=lowercase__ , epochs=args.num_epochs , callbacks=lowercase__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__lowercase : Union[str, Any] =parse_args()
main(args)
| 54 | 0 |
"""simple docstring"""
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
A__ : Tuple = logging.getLogger(__name__)
A__ : Optional[int] = tf.data.AUTOTUNE
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: Dict = argparse.ArgumentParser(description='''Train a masked language model on TPU.''' )
parser.add_argument(
'''--pretrained_model_config''' , type=lowercase__ , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , )
parser.add_argument(
'''--tokenizer''' , type=lowercase__ , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , )
parser.add_argument(
'''--per_replica_batch_size''' , type=lowercase__ , default=8 , help='''Batch size per TPU core.''' , )
parser.add_argument(
'''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , )
parser.add_argument(
'''--tpu_name''' , type=lowercase__ , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , )
parser.add_argument(
'''--tpu_zone''' , type=lowercase__ , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , )
parser.add_argument(
'''--gcp_project''' , type=lowercase__ , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' )
parser.add_argument(
'''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , )
parser.add_argument(
'''--train_dataset''' , type=lowercase__ , help='''Path to training dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--shuffle_buffer_size''' , type=lowercase__ , default=2**18 , help='''Size of the shuffle buffer (in samples)''' , )
parser.add_argument(
'''--eval_dataset''' , type=lowercase__ , help='''Path to evaluation dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--num_epochs''' , type=lowercase__ , default=1 , help='''Number of epochs to train for.''' , )
parser.add_argument(
'''--learning_rate''' , type=lowercase__ , default=1e-4 , help='''Learning rate to use for training.''' , )
parser.add_argument(
'''--weight_decay_rate''' , type=lowercase__ , default=1e-3 , help='''Weight decay rate to use for training.''' , )
parser.add_argument(
'''--max_length''' , type=lowercase__ , default=512 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , )
parser.add_argument(
'''--mlm_probability''' , type=lowercase__ , default=0.15 , help='''Fraction of tokens to mask during training.''' , )
parser.add_argument('''--output_dir''' , type=lowercase__ , required=lowercase__ , help='''Path to save model checkpoints to.''' )
parser.add_argument('''--hub_model_id''' , type=lowercase__ , help='''Model ID to upload to on the Hugging Face Hub.''' )
_lowercase: Optional[int] = parser.parse_args()
return args
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
try:
if args.tpu_name:
_lowercase: List[str] = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
_lowercase: List[Any] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '''
'''--gcp_project. When running on a TPU VM, use --tpu_name local.''' )
tf.config.experimental_connect_to_cluster(lowercase__ )
tf.tpu.experimental.initialize_tpu_system(lowercase__ )
return tpu
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
_lowercase: Optional[Any] = 0
for file in file_list:
_lowercase: int = file.split('''/''' )[-1]
_lowercase: List[Any] = re.search(R'''-\d+-(\d+)\.tfrecord''' , lowercase__ ).group(1 )
_lowercase: Optional[Any] = int(lowercase__ )
num_samples += sample_count
return num_samples
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
_lowercase: Optional[int] = count_samples(lowercase__ )
_lowercase: int = tf.data.Dataset.from_tensor_slices(lowercase__ )
if shuffle:
_lowercase: List[Any] = dataset.shuffle(len(lowercase__ ) )
_lowercase: Optional[int] = tf.data.TFRecordDataset(lowercase__ , num_parallel_reads=lowercase__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
_lowercase: List[str] = dataset.apply(tf.data.experimental.assert_cardinality(lowercase__ ) )
_lowercase: Optional[int] = dataset.map(lowercase__ , num_parallel_calls=lowercase__ )
if shuffle:
assert shuffle_buffer_size is not None
_lowercase: List[str] = dataset.shuffle(args.shuffle_buffer_size )
_lowercase: Optional[Any] = dataset.batch(lowercase__ , drop_remainder=lowercase__ )
_lowercase: Any = dataset.map(lowercase__ , num_parallel_calls=lowercase__ )
_lowercase: Optional[Any] = dataset.prefetch(lowercase__ )
return dataset
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
if not args.no_tpu:
_lowercase: Union[str, Any] = initialize_tpu(lowercase__ )
_lowercase: Any = tf.distribute.TPUStrategy(lowercase__ )
else:
_lowercase: Dict = tf.distribute.OneDeviceStrategy(device='''/gpu:0''' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' )
_lowercase: Tuple = AutoTokenizer.from_pretrained(args.tokenizer )
_lowercase: str = AutoConfig.from_pretrained(args.pretrained_model_config )
_lowercase: List[str] = tokenizer.vocab_size
_lowercase: Tuple = tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) )
if not training_records:
raise ValueError(f'''No .tfrecord files found in {args.train_dataset}.''' )
_lowercase: str = tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) )
if not eval_records:
raise ValueError(f'''No .tfrecord files found in {args.eval_dataset}.''' )
_lowercase: Optional[Any] = count_samples(lowercase__ )
_lowercase: Optional[int] = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
_lowercase: Union[str, Any] = steps_per_epoch * args.num_epochs
with strategy.scope():
_lowercase: Any = TFAutoModelForMaskedLM.from_config(lowercase__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
_lowercase , _lowercase: List[str] = create_optimizer(
num_train_steps=lowercase__ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowercase__ , metrics=['''accuracy'''] )
def decode_fn(_UpperCamelCase ):
_lowercase: Dict = {
'''input_ids''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'''attention_mask''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowercase__ , lowercase__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
_lowercase: Tuple = DataCollatorForLanguageModeling(
tokenizer=lowercase__ , mlm_probability=args.mlm_probability , mlm=lowercase__ , return_tensors='''tf''' )
def mask_with_collator(_UpperCamelCase ):
# TF really needs an isin() function
_lowercase: Optional[int] = (
~tf.cast(batch['''attention_mask'''] , tf.bool )
| (batch['''input_ids'''] == tokenizer.cls_token_id)
| (batch['''input_ids'''] == tokenizer.sep_token_id)
)
_lowercase , _lowercase: Optional[int] = data_collator.tf_mask_tokens(
batch['''input_ids'''] , vocab_size=len(lowercase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase__ , )
return batch
_lowercase: int = args.per_replica_batch_size * strategy.num_replicas_in_sync
_lowercase: Optional[int] = prepare_dataset(
lowercase__ , decode_fn=lowercase__ , mask_fn=lowercase__ , batch_size=lowercase__ , shuffle=lowercase__ , shuffle_buffer_size=args.shuffle_buffer_size , )
_lowercase: Optional[Any] = prepare_dataset(
lowercase__ , decode_fn=lowercase__ , mask_fn=lowercase__ , batch_size=lowercase__ , shuffle=lowercase__ , )
_lowercase: List[Any] = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase__ ) )
model.fit(
lowercase__ , validation_data=lowercase__ , epochs=args.num_epochs , callbacks=lowercase__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
A__ : Union[str, Any] = parse_args()
main(args)
| 353 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A :
@staticmethod
def lowerCAmelCase__ ( *_lowerCAmelCase: List[Any] , **_lowerCAmelCase: List[str] ) -> List[str]:
'''simple docstring'''
pass
@is_pipeline_test
@require_torch
@require_vision
class A ( unittest.TestCase ):
_snake_case =MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: List[str] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
UpperCAmelCase_ =[
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: str ) -> int:
'''simple docstring'''
UpperCAmelCase_ =vqa_pipeline(_lowerCAmelCase , top_k=1 )
self.assertEqual(
_lowerCAmelCase , [
[{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}],
[{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}],
] , )
@require_torch
def lowerCAmelCase__ ( self: Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
UpperCAmelCase_ ="./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ ="How many cats are there?"
UpperCAmelCase_ =vqa_pipeline(image=_lowerCAmelCase , question="How many cats are there?" , top_k=2 )
self.assertEqual(
_lowerCAmelCase , [{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}, {"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}] )
UpperCAmelCase_ =vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
_lowerCAmelCase , [{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}, {"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}] )
@slow
@require_torch
def lowerCAmelCase__ ( self: List[str] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
UpperCAmelCase_ ="./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ ="How many cats are there?"
UpperCAmelCase_ =vqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
UpperCAmelCase_ =vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
UpperCAmelCase_ =vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [[{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def lowerCAmelCase__ ( self: int ) -> List[str]:
'''simple docstring'''
pass
| 54 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _A ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[Any] = StableDiffusionInstructPixaPixPipeline
lowerCamelCase : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
lowerCamelCase : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCamelCase : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _a ( self : Tuple ) -> int:
torch.manual_seed(0 )
__UpperCAmelCase =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__UpperCAmelCase =PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
torch.manual_seed(0 )
__UpperCAmelCase =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__UpperCAmelCase =CLIPTextModel(_lowerCAmelCase )
__UpperCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__UpperCAmelCase ={
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple=0 ) -> Optional[int]:
__UpperCAmelCase =floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
__UpperCAmelCase =image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCAmelCase =Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert("""RGB""" )
if str(_lowerCAmelCase ).startswith("""mps""" ):
__UpperCAmelCase =torch.manual_seed(_lowerCAmelCase )
else:
__UpperCAmelCase =torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
__UpperCAmelCase ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : Optional[int] ) -> str:
__UpperCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase =self.get_dummy_components()
__UpperCAmelCase =StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
__UpperCAmelCase =sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__UpperCAmelCase =self.get_dummy_inputs(_lowerCAmelCase )
__UpperCAmelCase =sd_pipe(**_lowerCAmelCase ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase =np.array([0.7_526, 0.3_750, 0.4_547, 0.6_117, 0.5_866, 0.5_016, 0.4_327, 0.5_642, 0.4_815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Any ) -> Union[str, Any]:
__UpperCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase =self.get_dummy_components()
__UpperCAmelCase =StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
__UpperCAmelCase =sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__UpperCAmelCase =self.get_dummy_inputs(_lowerCAmelCase )
__UpperCAmelCase ="""french fries"""
__UpperCAmelCase =sd_pipe(**_lowerCAmelCase , negative_prompt=_lowerCAmelCase )
__UpperCAmelCase =output.images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase =np.array([0.7_511, 0.3_642, 0.4_553, 0.6_236, 0.5_797, 0.5_013, 0.4_343, 0.5_611, 0.4_831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase =self.get_dummy_components()
__UpperCAmelCase =StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
__UpperCAmelCase =sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__UpperCAmelCase =self.get_dummy_inputs(_lowerCAmelCase )
__UpperCAmelCase =[inputs["""prompt"""]] * 2
__UpperCAmelCase =np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0
__UpperCAmelCase =torch.from_numpy(_lowerCAmelCase ).unsqueeze(0 ).to(_lowerCAmelCase )
__UpperCAmelCase =image / 2 + 0.5
__UpperCAmelCase =image.permute(0 , 3 , 1 , 2 )
__UpperCAmelCase =image.repeat(2 , 1 , 1 , 1 )
__UpperCAmelCase =sd_pipe(**_lowerCAmelCase ).images
__UpperCAmelCase =image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
__UpperCAmelCase =np.array([0.5_812, 0.5_748, 0.5_222, 0.5_908, 0.5_695, 0.7_174, 0.6_804, 0.5_523, 0.5_579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Union[str, Any] ) -> str:
__UpperCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase =self.get_dummy_components()
__UpperCAmelCase =EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
__UpperCAmelCase =StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
__UpperCAmelCase =sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__UpperCAmelCase =self.get_dummy_inputs(_lowerCAmelCase )
__UpperCAmelCase =sd_pipe(**_lowerCAmelCase ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
__UpperCAmelCase =[round(_lowerCAmelCase , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(_lowerCAmelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase =np.array([0.7_417, 0.3_842, 0.4_732, 0.5_776, 0.5_891, 0.5_139, 0.4_052, 0.5_673, 0.4_986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Optional[int] ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _a ( self : str ) -> Dict:
__UpperCAmelCase =self.get_dummy_components()
__UpperCAmelCase =StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
__UpperCAmelCase =VaeImageProcessor(do_resize=_lowerCAmelCase , do_normalize=_lowerCAmelCase )
__UpperCAmelCase =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__UpperCAmelCase =pipe(**self.get_dummy_inputs_by_type(_lowerCAmelCase , input_image_type="""pt""" ) )[0]
__UpperCAmelCase =components["""vae"""]
__UpperCAmelCase =self.get_dummy_inputs_by_type(_lowerCAmelCase , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__UpperCAmelCase =vae.encode(inputs[image_param] ).latent_dist.mode()
__UpperCAmelCase =pipe(**_lowerCAmelCase )[0]
__UpperCAmelCase =np.abs(out - out_latents_inputs ).max()
self.assertLess(_lowerCAmelCase , 1e-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Dict ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : Tuple=0 ) -> Tuple:
__UpperCAmelCase =torch.manual_seed(_lowerCAmelCase )
__UpperCAmelCase =load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
__UpperCAmelCase ={
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : Optional[Any] ) -> int:
__UpperCAmelCase =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
__UpperCAmelCase =self.get_inputs()
__UpperCAmelCase =pipe(**_lowerCAmelCase ).images
__UpperCAmelCase =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__UpperCAmelCase =np.array([0.5_902, 0.6_015, 0.6_027, 0.5_983, 0.6_092, 0.6_061, 0.5_765, 0.5_785, 0.5_555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _a ( self : Optional[int] ) -> Dict:
__UpperCAmelCase =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_lowerCAmelCase )
__UpperCAmelCase =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
__UpperCAmelCase =self.get_inputs()
__UpperCAmelCase =pipe(**_lowerCAmelCase ).images
__UpperCAmelCase =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__UpperCAmelCase =np.array([0.6_578, 0.6_817, 0.6_972, 0.6_761, 0.6_856, 0.6_916, 0.6_428, 0.6_516, 0.6_301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _a ( self : Dict ) -> str:
__UpperCAmelCase =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_lowerCAmelCase )
__UpperCAmelCase =DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
__UpperCAmelCase =self.get_inputs()
__UpperCAmelCase =pipe(**_lowerCAmelCase ).images
__UpperCAmelCase =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__UpperCAmelCase =np.array([0.3_828, 0.3_834, 0.3_818, 0.3_792, 0.3_865, 0.3_752, 0.3_792, 0.3_847, 0.3_753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _a ( self : Dict ) -> Optional[int]:
__UpperCAmelCase =0
def callback_fn(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : torch.FloatTensor ) -> None:
__UpperCAmelCase =True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__UpperCAmelCase =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__UpperCAmelCase =latents[0, -3:, -3:, -1]
__UpperCAmelCase =np.array([-0.2_463, -0.4_644, -0.9_756, 1.5_176, 1.4_414, 0.7_866, 0.9_897, 0.8_521, 0.7_983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
__UpperCAmelCase =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__UpperCAmelCase =latents[0, -3:, -3:, -1]
__UpperCAmelCase =np.array([-0.2_644, -0.4_626, -0.9_653, 1.5_176, 1.4_551, 0.7_686, 0.9_805, 0.8_452, 0.8_115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
__UpperCAmelCase =False
__UpperCAmelCase =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_lowerCAmelCase , torch_dtype=torch.floataa )
__UpperCAmelCase =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
__UpperCAmelCase =self.get_inputs()
pipe(**_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _a ( self : Union[str, Any] ) -> Dict:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_lowerCAmelCase , torch_dtype=torch.floataa )
__UpperCAmelCase =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase =self.get_inputs()
__UpperCAmelCase =pipe(**_lowerCAmelCase )
__UpperCAmelCase =torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def _a ( self : Dict ) -> str:
__UpperCAmelCase =self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__UpperCAmelCase =inputs["""image"""].resize((504, 504) )
__UpperCAmelCase ="""timbrooks/instruct-pix2pix"""
__UpperCAmelCase =StableDiffusionInstructPixaPixPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
__UpperCAmelCase =pipe(**_lowerCAmelCase )
__UpperCAmelCase =output.images[0]
__UpperCAmelCase =image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
__UpperCAmelCase =np.array([0.2_726, 0.2_529, 0.2_664, 0.2_655, 0.2_641, 0.2_642, 0.2_591, 0.2_649, 0.2_590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 68 |
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
UpperCAmelCase_ =[p / w for p, w in zip(lowercase__ , lowercase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
UpperCAmelCase_ =sorted(lowercase__ )
# declaring useful variables
UpperCAmelCase_ =len(lowercase__ )
UpperCAmelCase_ =0
UpperCAmelCase_ =0
UpperCAmelCase_ =0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
UpperCAmelCase_ =sorted_profit_by_weight[length - i - 1]
UpperCAmelCase_ =profit_by_weight.index(lowercase__ )
UpperCAmelCase_ =-1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
__lowercase : List[str] =[int(x) for x in input("""Input profits separated by spaces: """).split()]
__lowercase : Union[str, Any] =[int(x) for x in input("""Input weights separated by spaces: """).split()]
__lowercase : Tuple =int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 54 | 0 |
from math import sqrt
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :str = 0
for i in range(1, int(sqrt(lowercase__ ) + 1 ) ):
if n % i == 0 and i != sqrt(lowercase__ ):
total += i + n // i
elif i == sqrt(lowercase__ ):
total += i
return total - n
def __lowercase ( snake_case = 1_0_0_0_0 ):
"""simple docstring"""
__magic_name__ :List[str] = sum(
i
for i in range(1, lowercase__ )
if sum_of_divisors(sum_of_divisors(lowercase__ ) ) == i and sum_of_divisors(lowercase__ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowercase : Dict ={
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any =["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] =[
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] =[
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__lowercase : Union[str, Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 54 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowercase ( __lowercase ):
lowercase = 42
lowercase = 42
def __init__( self : Tuple , __lowerCamelCase : UNetaDModel , __lowerCamelCase : ScoreSdeVeScheduler ) -> Optional[int]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
@torch.no_grad()
def __call__( self : int , __lowerCamelCase : int = 1 , __lowerCamelCase : int = 20_00 , __lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , **__lowerCamelCase : Tuple , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
lowercase = self.unet.config.sample_size
lowercase = (batch_size, 3, img_size, img_size)
lowercase = self.unet
lowercase = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase ) * self.scheduler.init_noise_sigma
lowercase = sample.to(self.device )
self.scheduler.set_timesteps(_lowerCAmelCase )
self.scheduler.set_sigmas(_lowerCAmelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowercase = self.unet(_lowerCAmelCase , _lowerCAmelCase ).sample
lowercase = self.scheduler.step_correct(_lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
# prediction step
lowercase = model(_lowerCAmelCase , _lowerCAmelCase ).sample
lowercase = self.scheduler.step_pred(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase )
lowercase ,lowercase = output.prev_sample, output.prev_sample_mean
lowercase = sample_mean.clamp(0 , 1 )
lowercase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_lowerCAmelCase )
| 604 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a__ ( lowercase__ , lowercase__ , lowercase__=1_0_2_4 , lowercase__=1_0_2_4 , lowercase__=False , **lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =AutoTokenizer.from_pretrained(lowercase__ )
UpperCAmelCase_ =SeqaSeqDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ , type_path="train" , **lowercase__ )
UpperCAmelCase_ =tok.pad_token_id
def get_lens(lowercase__ ):
UpperCAmelCase_ =tqdm(
DataLoader(lowercase__ , batch_size=5_1_2 , num_workers=8 , shuffle=lowercase__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCAmelCase_ =[]
for batch in dl:
UpperCAmelCase_ =batch["input_ids"].ne(lowercase__ ).sum(1 ).tolist()
UpperCAmelCase_ =batch["labels"].ne(lowercase__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowercase__ , lowercase__ ):
max_lens.append(max(lowercase__ , lowercase__ ) )
else:
max_lens.extend(lowercase__ )
return max_lens
UpperCAmelCase_ =get_lens(lowercase__ )
UpperCAmelCase_ =SeqaSeqDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ , type_path="val" , **lowercase__ )
UpperCAmelCase_ =get_lens(lowercase__ )
pickle_save(lowercase__ , train_ds.len_file )
pickle_save(lowercase__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 54 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE__ :int = "Pix2StructImageProcessor"
SCREAMING_SNAKE_CASE__ :Optional[Any] = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self : Optional[Any] , __a : Optional[Any] , __a : List[str] ) -> int:
_UpperCamelCase : Tuple = False
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
def __call__( self : Tuple , __a : str=None , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : Optional[int] = 2048 , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : int , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
_UpperCamelCase : Optional[int] = self.tokenizer
_UpperCamelCase : Any = self.tokenizer(
text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
_UpperCamelCase : str = self.image_processor(
_lowerCAmelCase , return_tensors=_lowerCAmelCase , max_patches=_lowerCAmelCase , **_lowerCAmelCase )
else:
# add pixel_values and bbox
_UpperCamelCase : Union[str, Any] = self.image_processor(
_lowerCAmelCase , return_tensors=_lowerCAmelCase , max_patches=_lowerCAmelCase , header_text=_lowerCAmelCase , **_lowerCAmelCase )
if text is not None and not self.image_processor.is_vqa:
_UpperCamelCase : Optional[int] = self.tokenizer(
text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
if "attention_mask" in text_encoding:
_UpperCamelCase : Tuple = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
_UpperCamelCase : Optional[Any] = text_encoding.pop("input_ids" )
else:
_UpperCamelCase : Any = None
if text_encoding is not None:
encoding_image_processor.update(_lowerCAmelCase )
return encoding_image_processor
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , *__a : Any , **__a : int ) -> Optional[Any]:
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( self : str , *__a : List[str] , **__a : List[str] ) -> int:
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
_UpperCamelCase : Any = self.tokenizer.model_input_names
_UpperCamelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 624 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A :
def __init__( self: Any , _lowerCAmelCase: str , _lowerCAmelCase: Optional[Any]=13 , _lowerCAmelCase: List[str]=30 , _lowerCAmelCase: List[Any]=2 , _lowerCAmelCase: List[str]=3 , _lowerCAmelCase: Dict=True , _lowerCAmelCase: int=True , _lowerCAmelCase: Tuple=32 , _lowerCAmelCase: str=2 , _lowerCAmelCase: Dict=4 , _lowerCAmelCase: Dict=37 , _lowerCAmelCase: Optional[Any]="gelu" , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: Union[str, Any]=10 , _lowerCAmelCase: str=0.02 , _lowerCAmelCase: Optional[Any]=3 , _lowerCAmelCase: Optional[int]=None , ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =image_size
UpperCAmelCase_ =patch_size
UpperCAmelCase_ =num_channels
UpperCAmelCase_ =is_training
UpperCAmelCase_ =use_labels
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =num_attention_heads
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =type_sequence_label_size
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ =(image_size // patch_size) ** 2
UpperCAmelCase_ =num_patches + 1
def lowerCAmelCase__ ( self: Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ =None
if self.use_labels:
UpperCAmelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ =self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: int , _lowerCAmelCase: Any , _lowerCAmelCase: List[str] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =TFViTModel(config=_lowerCAmelCase )
UpperCAmelCase_ =model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ =self.image_size // 2
UpperCAmelCase_ =pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ =model(_lowerCAmelCase , interpolate_pos_encoding=_lowerCAmelCase , training=_lowerCAmelCase )
UpperCAmelCase_ =(image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.type_sequence_label_size
UpperCAmelCase_ =TFViTForImageClassification(_lowerCAmelCase )
UpperCAmelCase_ =model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ =self.image_size // 2
UpperCAmelCase_ =pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ =model(_lowerCAmelCase , interpolate_pos_encoding=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ =1
UpperCAmelCase_ =TFViTForImageClassification(_lowerCAmelCase )
UpperCAmelCase_ =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ =model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ ={"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class A ( __lowercase , __lowercase , unittest.TestCase ):
_snake_case =(TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
_snake_case =(
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
_snake_case =False
_snake_case =False
_snake_case =False
def lowerCAmelCase__ ( self: int ) -> int:
'''simple docstring'''
UpperCAmelCase_ =TFViTModelTester(self )
UpperCAmelCase_ =ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowerCAmelCase__ ( self: Dict ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowerCAmelCase__ ( self: int ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: List[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ =model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase_ =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , tf.keras.layers.Layer ) )
def lowerCAmelCase__ ( self: List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ =model_class(_lowerCAmelCase )
UpperCAmelCase_ =inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ =[*signature.parameters.keys()]
UpperCAmelCase_ =["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def lowerCAmelCase__ ( self: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def lowerCAmelCase__ ( self: Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =TFViTModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(_lowerCAmelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class A ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self: Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" )
UpperCAmelCase_ =self.default_image_processor
UpperCAmelCase_ =prepare_img()
UpperCAmelCase_ =image_processor(images=_lowerCAmelCase , return_tensors="tf" )
# forward pass
UpperCAmelCase_ =model(**_lowerCAmelCase )
# verify the logits
UpperCAmelCase_ =tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
UpperCAmelCase_ =tf.constant([-0.27_44, 0.82_15, -0.08_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 )
| 54 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ):
"""simple docstring"""
lowercase = 1
lowercase = 3
lowercase = (32, 32)
lowercase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowerCAmelCase )
return image
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(_lowerCAmelCase )
@property
def A__ ( self ):
"""simple docstring"""
def extract(*__lowerCAmelCase , **__lowerCAmelCase ):
class _A :
def __init__( self ):
"""simple docstring"""
lowercase = torch.ones([0] )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
self.pixel_values.to(_lowerCAmelCase )
return self
return Out()
return extract
def A__ ( self ):
"""simple docstring"""
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.dummy_cond_unet
lowercase = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
lowercase = 77
lowercase = self.dummy_image.to(_lowerCAmelCase )
lowercase = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowercase = AltDiffusionImgaImgPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCAmelCase )
lowercase = alt_pipe.to(_lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = """A painting of a squirrel eating a burger"""
lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
lowercase = alt_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=_lowerCAmelCase , )
lowercase = output.images
lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
lowercase = alt_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=_lowerCAmelCase , return_dict=_lowerCAmelCase , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def A__ ( self ):
"""simple docstring"""
lowercase = self.dummy_cond_unet
lowercase = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
lowercase = 77
lowercase = self.dummy_image.to(_lowerCAmelCase )
# put models in fp16
lowercase = unet.half()
lowercase = vae.half()
lowercase = bert.half()
# make sure here that pndm scheduler skips prk
lowercase = AltDiffusionImgaImgPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCAmelCase )
lowercase = alt_pipe.to(_lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = """A painting of a squirrel eating a burger"""
lowercase = torch.manual_seed(0 )
lowercase = alt_pipe(
[prompt] , generator=_lowerCAmelCase , num_inference_steps=2 , output_type="""np""" , image=_lowerCAmelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def A__ ( self ):
"""simple docstring"""
lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase = init_image.resize((760, 504) )
lowercase = """BAAI/AltDiffusion"""
lowercase = AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = """A fantasy landscape, trending on artstation"""
lowercase = torch.manual_seed(0 )
lowercase = pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.7_5 , guidance_scale=7.5 , generator=_lowerCAmelCase , output_type="""np""" , )
lowercase = output.images[0]
lowercase = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
lowercase = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ):
"""simple docstring"""
lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
lowercase = init_image.resize((768, 512) )
lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
lowercase = """BAAI/AltDiffusion"""
lowercase = AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = """A fantasy landscape, trending on artstation"""
lowercase = torch.manual_seed(0 )
lowercase = pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.7_5 , guidance_scale=7.5 , generator=_lowerCAmelCase , output_type="""np""" , )
lowercase = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 359 |
from __future__ import annotations
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) == 0:
return False
UpperCAmelCase_ =len(lowercase__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , lowercase__ )
else:
return binary_search(a_list[midpoint + 1 :] , lowercase__ )
if __name__ == "__main__":
__lowercase : Tuple =input("""Enter numbers separated by comma:\n""").strip()
__lowercase : Optional[Any] =[int(item.strip()) for item in user_input.split(""",""")]
__lowercase : List[Any] =int(input("""Enter the number to be found in the list:\n""").strip())
__lowercase : Optional[Any] ="""""" if binary_search(sequence, target) else """not """
print(f"""{target} was {not_str}found in {sequence}""")
| 54 | 0 |
"""simple docstring"""
import random
from .binary_exp_mod import bin_exp_mod
def A ( snake_case__ , snake_case__=10_00 ):
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
SCREAMING_SNAKE_CASE__ = n - 1
SCREAMING_SNAKE_CASE__ = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
SCREAMING_SNAKE_CASE__ = 0
while count < prec:
SCREAMING_SNAKE_CASE__ = random.randint(2 , n - 1 )
SCREAMING_SNAKE_CASE__ = bin_exp_mod(lowercase__ , lowercase__ , lowercase__ )
if b != 1:
SCREAMING_SNAKE_CASE__ = True
for _ in range(lowercase__ ):
if b == n - 1:
SCREAMING_SNAKE_CASE__ = False
break
SCREAMING_SNAKE_CASE__ = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
A_ : str = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 196 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__lowercase : Any =(
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
__lowercase : Union[str, Any] =(
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
__lowercase : List[str] =(
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
__lowercase : str =(
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
__lowercase : Union[str, Any] =(
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 14]),
("""2H 5D 3C AS 5S""", False, [14, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [14, 13, 12, 11, 10]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
__lowercase : str =(
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
__lowercase : int =(
("""JH AH TH KH QH""", 23),
("""JH 9H TH KH QH""", 22),
("""JC KH JS JD JH""", 21),
("""KH KC 3S 3H 3D""", 20),
("""8C 9C 5C 3C TC""", 19),
("""JS QS 9H TS KH""", 18),
("""7C 7S KH 2H 7H""", 17),
("""3C KH 5D 5S KH""", 16),
("""QH 8H KD JH 8S""", 15),
("""2D 6D 9D TH 7D""", 14),
)
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =randrange(len(lowercase__ ) ), randrange(len(lowercase__ ) )
UpperCAmelCase_ =["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)]
UpperCAmelCase_ , UpperCAmelCase_ =SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def a__ ( lowercase__ = 1_0_0 ):
'''simple docstring'''
return (generate_random_hand() for _ in range(lowercase__ ))
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_flush() == expected
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_straight() == expected
@pytest.mark.parametrize("hand, expected, card_values" , lowercase__ )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =PokerHand(lowercase__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_same_kind() == expected
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._hand_type == expected
@pytest.mark.parametrize("hand, other, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
@pytest.mark.parametrize("hand, other, expected" , generate_random_hands() )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[PokerHand(lowercase__ ) for hand in SORTED_HANDS]
UpperCAmelCase_ =poker_hands.copy()
shuffle(lowercase__ )
UpperCAmelCase_ =chain(sorted(lowercase__ ) )
for index, hand in enumerate(lowercase__ ):
assert hand == poker_hands[index]
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[PokerHand("2D AC 3H 4H 5S" ), PokerHand("2S 3H 4H 5S 6C" )]
pokerhands.sort(reverse=lowercase__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =PokerHand("2C 4S AS 3D 5C" )
UpperCAmelCase_ =True
UpperCAmelCase_ =[5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =0
UpperCAmelCase_ =os.path.abspath(os.path.dirname(lowercase__ ) )
UpperCAmelCase_ =os.path.join(lowercase__ , "poker_hands.txt" )
with open(lowercase__ ) as file_hand:
for line in file_hand:
UpperCAmelCase_ =line[:1_4].strip()
UpperCAmelCase_ =line[1_5:].strip()
UpperCAmelCase_ , UpperCAmelCase_ =PokerHand(lowercase__ ), PokerHand(lowercase__ )
UpperCAmelCase_ =player.compare_with(lowercase__ )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 54 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_lowerCAmelCase = False
class __UpperCamelCase ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
_lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
_lowerCAmelCase : List[str] = pipe(
image=_lowerCAmelCase ,generator=_lowerCAmelCase ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='numpy' ,).images
_lowerCAmelCase : Optional[int] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : str = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 259 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowercase : int =logging.get_logger(__name__)
class A ( __lowercase ):
_snake_case =['''pixel_values''']
def __init__( self: List[Any] , _lowerCAmelCase: bool = True , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: float = None , _lowerCAmelCase: PILImageResampling = PILImageResampling.BILINEAR , _lowerCAmelCase: bool = True , _lowerCAmelCase: Union[int, float] = 1 / 255 , _lowerCAmelCase: bool = True , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , **_lowerCAmelCase: Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
UpperCAmelCase_ =size if size is not None else {"shortest_edge": 384}
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =do_resize
UpperCAmelCase_ =size
# Default value set here for backwards compatibility where the value in config is None
UpperCAmelCase_ =crop_pct if crop_pct is not None else 224 / 256
UpperCAmelCase_ =resample
UpperCAmelCase_ =do_rescale
UpperCAmelCase_ =rescale_factor
UpperCAmelCase_ =do_normalize
UpperCAmelCase_ =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ =image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Dict[str, int] , _lowerCAmelCase: float , _lowerCAmelCase: PILImageResampling = PILImageResampling.BICUBIC , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Any , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
UpperCAmelCase_ =size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
UpperCAmelCase_ =int(shortest_edge / crop_pct )
UpperCAmelCase_ =get_resize_output_image_size(_lowerCAmelCase , size=_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_lowerCAmelCase , size=(shortest_edge, shortest_edge) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_lowerCAmelCase , size=(shortest_edge, shortest_edge) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Union[int, float] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: str , ) -> Optional[Any]:
'''simple docstring'''
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Union[float, List[float]] , _lowerCAmelCase: Union[float, List[float]] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Dict , ) -> np.ndarray:
'''simple docstring'''
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: ImageInput , _lowerCAmelCase: bool = None , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: float = None , _lowerCAmelCase: PILImageResampling = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: float = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[str, TensorType]] = None , _lowerCAmelCase: ChannelDimension = ChannelDimension.FIRST , **_lowerCAmelCase: Optional[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCAmelCase_ =do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ =crop_pct if crop_pct is not None else self.crop_pct
UpperCAmelCase_ =resample if resample is not None else self.resample
UpperCAmelCase_ =do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ =do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ =image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ =image_std if image_std is not None else self.image_std
UpperCAmelCase_ =size if size is not None else self.size
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ =[to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
UpperCAmelCase_ =[self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , crop_pct=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_rescale:
UpperCAmelCase_ =[self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
if do_normalize:
UpperCAmelCase_ =[self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images]
UpperCAmelCase_ =[to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
UpperCAmelCase_ ={"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 54 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
"""BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""",
"""BridgeTower/bridgetower-base-itm-mlm""": (
"""https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"""
),
}
class __snake_case (__lowercase ):
lowerCAmelCase__ = "bridgetower_vision_model"
def __init__( self : Union[str, Any] , _UpperCAmelCase : Optional[int]=768 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : Tuple=3 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : List[Any]=288 , _UpperCAmelCase : int=1 , _UpperCAmelCase : List[Any]=1E-05 , _UpperCAmelCase : int=False , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : List[str]=False , **_UpperCAmelCase : Tuple , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : Tuple = num_hidden_layers
_lowerCAmelCase : Tuple = num_channels
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : List[str] = image_size
_lowerCAmelCase : str = initializer_factor
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : str = stop_gradient
_lowerCAmelCase : Dict = share_layernorm
_lowerCAmelCase : Optional[Any] = remove_last_layer
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Dict ) -> "PretrainedConfig":
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : str = cls.get_config_dict(_lowerCAmelCase , **_lowerCAmelCase )
if config_dict.get("""model_type""" ) == "bridgetower":
_lowerCAmelCase : Union[str, Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowerCAmelCase , **_lowerCAmelCase )
class __snake_case (__lowercase ):
lowerCAmelCase__ = "bridgetower_text_model"
def __init__( self : List[str] , _UpperCAmelCase : List[str]=5_0265 , _UpperCAmelCase : List[Any]=768 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : List[str]=3072 , _UpperCAmelCase : int="gelu" , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : int=514 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : Optional[Any]=1E-05 , _UpperCAmelCase : str=1 , _UpperCAmelCase : Optional[int]=0 , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : int="absolute" , _UpperCAmelCase : Tuple=True , **_UpperCAmelCase : Tuple , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase : Dict = vocab_size
_lowerCAmelCase : Any = hidden_size
_lowerCAmelCase : Any = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : Optional[int] = initializer_factor
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Dict = position_embedding_type
_lowerCAmelCase : str = use_cache
_lowerCAmelCase : str = pad_token_id
_lowerCAmelCase : List[str] = bos_token_id
_lowerCAmelCase : Tuple = eos_token_id
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Dict , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : str ) -> "PretrainedConfig":
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Tuple = cls.get_config_dict(_lowerCAmelCase , **_lowerCAmelCase )
if config_dict.get("""model_type""" ) == "bridgetower":
_lowerCAmelCase : Tuple = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowerCAmelCase , **_lowerCAmelCase )
class __snake_case (__lowercase ):
lowerCAmelCase__ = "bridgetower"
def __init__( self : List[Any] , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Optional[Any]="gelu" , _UpperCAmelCase : Tuple=768 , _UpperCAmelCase : Optional[Any]=1 , _UpperCAmelCase : Optional[int]=1E-05 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Union[str, Any]="add" , _UpperCAmelCase : int=12 , _UpperCAmelCase : Optional[Any]=6 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : Dict , ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Dict = kwargs.pop("""text_config_dict""" , _lowerCAmelCase )
_lowerCAmelCase : Union[str, Any] = kwargs.pop("""vision_config_dict""" , _lowerCAmelCase )
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase : List[Any] = share_cross_modal_transformer_layers
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : Any = hidden_size
_lowerCAmelCase : int = initializer_factor
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = share_link_tower_layers
_lowerCAmelCase : List[str] = link_tower_type
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : Optional[Any] = num_hidden_layers
_lowerCAmelCase : Union[str, Any] = tie_word_embeddings
_lowerCAmelCase : Tuple = init_layernorm_from_vision_encoder
if text_config is None:
_lowerCAmelCase : List[str] = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
_lowerCAmelCase : Optional[int] = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
_lowerCAmelCase : List[str] = BridgeTowerTextConfig(**_lowerCAmelCase )
_lowerCAmelCase : int = BridgeTowerVisionConfig(**_lowerCAmelCase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[int] , _UpperCAmelCase : BridgeTowerTextConfig , _UpperCAmelCase : BridgeTowerVisionConfig , **_UpperCAmelCase : int ) -> List[Any]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
'''simple docstring'''
_lowerCAmelCase : List[str] = copy.deepcopy(self.__dict__ )
_lowerCAmelCase : Union[str, Any] = self.text_config.to_dict()
_lowerCAmelCase : List[str] = self.vision_config.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 429 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__lowercase : List[Any] =WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =test_results.split(" " )
UpperCAmelCase_ =0
UpperCAmelCase_ =0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
UpperCAmelCase_ =expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase__ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={}
UpperCAmelCase_ =None
UpperCAmelCase_ =False
for line in failures_short_lines.split("\n" ):
if re.search(R"_ \[doctest\]" , lowercase__ ):
UpperCAmelCase_ =True
UpperCAmelCase_ =line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
UpperCAmelCase_ =line
UpperCAmelCase_ =False
return failures
class A :
def __init__( self: Optional[Any] , _lowerCAmelCase: str , _lowerCAmelCase: Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =title
UpperCAmelCase_ =doc_test_results["time_spent"].split("," )[0]
UpperCAmelCase_ =doc_test_results["success"]
UpperCAmelCase_ =doc_test_results["failures"]
UpperCAmelCase_ =self.n_success + self.n_failures
# Failures and success of the modeling tests
UpperCAmelCase_ =doc_test_results
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =[self._time_spent]
UpperCAmelCase_ =0
for time in time_spent:
UpperCAmelCase_ =time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_lowerCAmelCase ) == 1:
UpperCAmelCase_ =[0, 0, time_parts[0]]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F'{int(_lowerCAmelCase )}h{int(_lowerCAmelCase )}m{int(_lowerCAmelCase )}s'
@property
def lowerCAmelCase__ ( self: int ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
F' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def lowerCAmelCase__ ( self: Tuple ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =40
UpperCAmelCase_ ={k: v["failed"] for k, v in doc_test_results.items() if isinstance(_lowerCAmelCase , _lowerCAmelCase )}
UpperCAmelCase_ =""
for category, failures in category_failures.items():
if len(_lowerCAmelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += F'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_lowerCAmelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =[self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_lowerCAmelCase )
@staticmethod
def lowerCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =[
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(_lowerCAmelCase )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=_lowerCAmelCase , )
def lowerCAmelCase__ ( self: Dict ) -> List[str]:
'''simple docstring'''
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
UpperCAmelCase_ =F'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else "All tests passed."
UpperCAmelCase_ =client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=_lowerCAmelCase , )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[str] , _lowerCAmelCase: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =""
for key, value in failures.items():
UpperCAmelCase_ =value[:200] + " [Truncated]" if len(_lowerCAmelCase ) > 250 else value
failures_text += F'*{key}*\n_{value}_\n\n'
UpperCAmelCase_ =job_name
UpperCAmelCase_ ={"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
UpperCAmelCase_ ={
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowerCAmelCase__ ( self: Any ) -> List[str]:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
UpperCAmelCase_ =self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
UpperCAmelCase_ =sorted(self.doc_test_results.items() , key=lambda _lowerCAmelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
UpperCAmelCase_ =F'*Num failures* :{len(job_result["failed"] )} \n'
UpperCAmelCase_ =job_result["failures"]
UpperCAmelCase_ =self.get_reply_blocks(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , text=_lowerCAmelCase )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=F'Results for {job}' , blocks=_lowerCAmelCase , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =os.environ["GITHUB_RUN_ID"]
UpperCAmelCase_ =F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
UpperCAmelCase_ =requests.get(lowercase__ ).json()
UpperCAmelCase_ ={}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
UpperCAmelCase_ =math.ceil((result["total_count"] - 1_0_0) / 1_0_0 )
for i in range(lowercase__ ):
UpperCAmelCase_ =requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , lowercase__ )
return {}
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={}
if os.path.exists(lowercase__ ):
UpperCAmelCase_ =os.listdir(lowercase__ )
for file in files:
try:
with open(os.path.join(lowercase__ , lowercase__ ) , encoding="utf-8" ) as f:
UpperCAmelCase_ =f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowercase__ , lowercase__ )}.' ) from e
return _artifact
def a__ ( ):
'''simple docstring'''
class A :
def __init__( self: Tuple , _lowerCAmelCase: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =name
UpperCAmelCase_ =[]
def __str__( self: Optional[int] ) -> Tuple:
'''simple docstring'''
return self.name
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: str ) -> List[Any]:
'''simple docstring'''
self.paths.append({"name": self.name, "path": path} )
UpperCAmelCase_ ={}
UpperCAmelCase_ =filter(os.path.isdir , os.listdir() )
for directory in directories:
UpperCAmelCase_ =directory
if artifact_name not in _available_artifacts:
UpperCAmelCase_ =Artifact(lowercase__ )
_available_artifacts[artifact_name].add_path(lowercase__ )
return _available_artifacts
if __name__ == "__main__":
__lowercase : str =get_job_links()
__lowercase : Dict =retrieve_available_artifacts()
__lowercase : Optional[int] =collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__lowercase : Any ={
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__lowercase : Tuple =github_actions_job_links.get("""run_doctests""")
__lowercase : int =available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
__lowercase : str =retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
__lowercase , __lowercase , __lowercase : Tuple =handle_test_results(artifact["""stats"""])
__lowercase : int =failed
__lowercase : int =success
__lowercase : str =time_spent[1:-1] + """, """
__lowercase : str =extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
__lowercase : int =line.replace("""FAILED """, """""")
__lowercase : List[Any] =line.split()[0].replace("""\n""", """""")
if "::" in line:
__lowercase , __lowercase : Any =line.split("""::""")
else:
__lowercase , __lowercase : Dict =line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__lowercase : Optional[int] =docs[file_regex]
doc_test_results[category]["failed"].append(test)
__lowercase : Tuple =all_failures[test] if test in all_failures else """N/A"""
__lowercase : Optional[int] =failure
break
__lowercase : Optional[int] =Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 54 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__lowercase )
class a__ ( __lowercase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
_SCREAMING_SNAKE_CASE : Optional[Any] = field(default='summarization' , metadata={'include_in_asdict_even_if_is_default': True} )
_SCREAMING_SNAKE_CASE : Any = Features({'text': Value('string' )} )
_SCREAMING_SNAKE_CASE : Any = Features({'summary': Value('string' )} )
_SCREAMING_SNAKE_CASE : Optional[Any] = 'text'
_SCREAMING_SNAKE_CASE : str = 'summary'
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return {self.text_column: "text", self.summary_column: "summary"}
| 245 |
def a__ ( lowercase__ = 2_0_0 ):
'''simple docstring'''
UpperCAmelCase_ =[1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
UpperCAmelCase_ =[0] * (pence + 1)
UpperCAmelCase_ =1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowercase__ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 7_3682
| 54 | 0 |
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase__ ( __lowercase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
_lowercase : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCAmelCase , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(_lowerCAmelCase , '''num_heads''' ) )
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : str=13 , UpperCamelCase : List[Any]=64 , UpperCamelCase : str=3 , UpperCamelCase : str=[16, 48, 96] , UpperCamelCase : Union[str, Any]=[1, 3, 6] , UpperCamelCase : Union[str, Any]=[1, 2, 10] , UpperCamelCase : List[Any]=[7, 3, 3] , UpperCamelCase : Dict=[4, 2, 2] , UpperCamelCase : Optional[int]=[2, 1, 1] , UpperCamelCase : List[str]=[2, 2, 2] , UpperCamelCase : Optional[int]=[False, False, True] , UpperCamelCase : Any=[0.0, 0.0, 0.0] , UpperCamelCase : str=0.02 , UpperCamelCase : Any=1E-12 , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Optional[int]=True , UpperCamelCase : List[Any]=2 , ):
"""simple docstring"""
_lowercase : List[str] = parent
_lowercase : Optional[Any] = batch_size
_lowercase : List[str] = image_size
_lowercase : Union[str, Any] = patch_sizes
_lowercase : List[Any] = patch_stride
_lowercase : List[str] = patch_padding
_lowercase : Union[str, Any] = is_training
_lowercase : str = use_labels
_lowercase : Optional[Any] = num_labels
_lowercase : str = num_channels
_lowercase : int = embed_dim
_lowercase : List[str] = num_heads
_lowercase : List[str] = stride_kv
_lowercase : List[Any] = depth
_lowercase : Dict = cls_token
_lowercase : Any = attention_drop_rate
_lowercase : Dict = initializer_range
_lowercase : List[str] = layer_norm_eps
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
_lowercase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase : Union[str, Any] = None
if self.use_labels:
# create a random int32 tensor of given shape
_lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
_lowercase : int = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] ):
"""simple docstring"""
_lowercase : int = TFCvtModel(config=_lowerCAmelCase )
_lowercase : List[Any] = model(_lowerCAmelCase , training=_lowerCAmelCase )
_lowercase : Optional[Any] = (self.image_size, self.image_size)
_lowercase , _lowercase : Union[str, Any] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_lowercase : Optional[int] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_lowercase : Union[str, Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def lowerCAmelCase_ ( self : Dict , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
_lowercase : Optional[Any] = self.num_labels
_lowercase : List[str] = TFCvtForImageClassification(_lowerCAmelCase )
_lowercase : List[str] = model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
_lowercase : Optional[Any] = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase : Dict = config_and_inputs
_lowercase : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
UpperCAmelCase_ = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_lowercase : Tuple = TFCvtModelTester(self )
_lowercase : Any = TFCvtConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='''Cvt does not output attentions''' )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason='''Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8''' )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
_lowercase : List[str] = tf.keras.mixed_precision.Policy('''mixed_float16''' )
tf.keras.mixed_precision.set_global_policy(_lowerCAmelCase )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('''float32''' )
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
_lowercase , _lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Dict = model_class(_lowerCAmelCase )
_lowercase : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : Dict = [*signature.parameters.keys()]
_lowercase : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase : Tuple , UpperCamelCase : str , UpperCamelCase : int ):
_lowercase : Tuple = model_class(_lowerCAmelCase )
_lowercase : List[Any] = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
_lowercase : Dict = outputs.hidden_states
_lowercase : Dict = len(self.model_tester.depth )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_lowercase , _lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Dict = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase : Optional[int] = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Any = TFCvtModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def UpperCamelCase__ ( ) -> List[Any]:
'''simple docstring'''
_lowercase : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
_lowercase : str = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowercase : Dict = self.default_image_processor
_lowercase : int = prepare_img()
_lowercase : Union[str, Any] = image_processor(images=_lowerCAmelCase , return_tensors='''tf''' )
# forward pass
_lowercase : int = model(**_lowerCAmelCase )
# verify the logits
_lowercase : Union[str, Any] = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
_lowercase : Optional[int] = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _lowerCAmelCase , atol=1E-4 ) )
| 322 |
import sys
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =len(lowercase__ )
UpperCAmelCase_ =[[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
UpperCAmelCase_ =[[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
for chain_length in range(2 , lowercase__ ):
for a in range(1 , n - chain_length + 1 ):
UpperCAmelCase_ =a + chain_length - 1
UpperCAmelCase_ =sys.maxsize
for c in range(lowercase__ , lowercase__ ):
UpperCAmelCase_ =(
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCAmelCase_ =cost
UpperCAmelCase_ =c
return matrix, sol
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if i == j:
print("A" + str(lowercase__ ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(lowercase__ , lowercase__ , optimal_solution[i][j] )
print_optiomal_solution(lowercase__ , optimal_solution[i][j] + 1 , lowercase__ )
print(")" , end=" " )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
UpperCAmelCase_ =len(lowercase__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCAmelCase_ , UpperCAmelCase_ =matrix_chain_order(lowercase__ )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowercase__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 54 | 0 |
"""simple docstring"""
from __future__ import annotations
class __magic_name__ :
def __init__( self , A_ ) -> None:
"""simple docstring"""
_lowercase: Optional[Any] = order
# a_{0} ... a_{k}
_lowercase: Optional[int] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
_lowercase: Dict = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_lowercase: int = [0.0] * self.order
# y[n-1] ... y[n-k]
_lowercase: Union[str, Any] = [0.0] * self.order
def lowercase_ ( self , A_ , A_ ) -> None:
"""simple docstring"""
if len(_lowerCAmelCase ) < self.order:
_lowercase: List[str] = [1.0, *a_coeffs]
if len(_lowerCAmelCase ) != self.order + 1:
_lowercase: Tuple = (
f'''Expected a_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(_lowerCAmelCase )}'''
)
raise ValueError(_lowerCAmelCase )
if len(_lowerCAmelCase ) != self.order + 1:
_lowercase: List[str] = (
f'''Expected b_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(_lowerCAmelCase )}'''
)
raise ValueError(_lowerCAmelCase )
_lowercase: List[str] = a_coeffs
_lowercase: str = b_coeffs
def lowercase_ ( self , A_ ) -> float:
"""simple docstring"""
_lowercase: List[Any] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_lowercase: int = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_lowercase: int = self.input_history[:-1]
_lowercase: Optional[Any] = self.output_history[:-1]
_lowercase: str = sample
_lowercase: Dict = result
return result
| 353 |
from math import loga
def a__ ( lowercase__ ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(lowercase__ , lowercase__ ):
raise TypeError("Input value must be a 'int' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase =tf.convert_to_tensor(
[
[
8.2_220_991, # 3rd highest value; idx. 0
-0.5_620_044,
5.23_229_752,
4.0_386_393,
-6.8_798_378,
-0.54_785_802,
-3.2_012_153,
2.92_777_176,
1.88_171_953,
7.35_341_276, # 5th highest value; idx. 9
8.43_207_833, # 2nd highest value; idx. 10
-9.85_711_836,
-5.96_209_236,
-1.13_039_161,
-7.1_115_294,
-0.8_369_633,
-5.3_186_408,
7.06_427_407,
0.81_369_344,
-0.82_023_817,
-5.9_179_796,
0.58_813_443,
-6.99_778_438,
4.71_551_189,
-0.18_771_637,
7.44_020_759, # 4th highest value; idx. 25
9.38_450_987, # 1st highest value; idx. 26
2.12_662_941,
-9.32_562_038,
2.35_652_522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_425_518,
4.53_139_238,
-5.57_510_464,
-6.28_030_699,
-7.19_529_503,
-4.02_122_551,
1.39_337_037,
-6.06_707_057,
1.59_480_517,
-9.643_119,
0.03_907_799,
0.67_231_762,
-8.88_206_726,
6.27_115_922, # 4th highest value; idx. 13
2.28_520_723,
4.82_767_506,
4.30_421_368,
8.8_275_313, # 2nd highest value; idx. 17
5.44_029_958, # 5th highest value; idx. 18
-4.4_735_794,
7.38_579_536, # 3rd highest value; idx. 20
-2.91_051_663,
2.61_946_077,
-2.5_674_762,
-9.48_959_302,
-4.02_922_645,
-1.35_416_918,
9.67_702_323, # 1st highest value; idx. 27
-5.89_478_553,
1.85_370_467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
__UpperCAmelCase =tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
__UpperCAmelCase =tf.convert_to_tensor(
[8.222_099, 7.3_534_126, 8.432_078, 7.4_402_075, 9.38_451, 6.271_159, 8.827_531, 5.4_402_995, 7.3_857_956, 9.677_023] , dtype=tf.floataa , ) # expected non filtered values as noted above
__UpperCAmelCase =tf_top_k_top_p_filtering(_lowerCAmelCase , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
__UpperCAmelCase =output[output != -float("""inf""" )]
__UpperCAmelCase =tf.cast(
tf.where(tf.not_equal(_lowerCAmelCase , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_lowerCAmelCase , _lowerCAmelCase , rtol=1e-12 )
tf.debugging.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
@require_tf
class _A ( unittest.TestCase , __lowercase ):
"""simple docstring"""
if is_tf_available():
lowerCamelCase : Optional[int] = {
'AutoModelForCausalLM': TFAutoModelForCausalLM,
'AutoModelForSpeechSeq2Seq': TFAutoModelForSpeechSeqaSeq,
'AutoModelForSeq2SeqLM': TFAutoModelForSeqaSeqLM,
'AutoModelForVision2Seq': TFAutoModelForVisionaSeq,
'LogitsProcessorList': TFLogitsProcessorList,
'MinLengthLogitsProcessor': TFMinLengthLogitsProcessor,
'create_tensor_fn': tf.convert_to_tensor,
'floats_tensor': floats_tensor,
'return_tensors': 'tf',
}
@slow
def _a ( self : Tuple ) -> Optional[Any]:
__UpperCAmelCase =TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__UpperCAmelCase =2
__UpperCAmelCase =2
class _A ( tf.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
super(_lowerCAmelCase , self ).__init__()
__UpperCAmelCase =model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=_lowerCAmelCase , )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
__UpperCAmelCase =self.model.generate(
input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase , max_new_tokens=_lowerCAmelCase , return_dict_in_generate=_lowerCAmelCase , )
return {"sequences": outputs["sequences"]}
__UpperCAmelCase =[[2, 0], [102, 103]]
__UpperCAmelCase =[[1, 0], [1, 1]]
__UpperCAmelCase =DummyModel(model=_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_lowerCAmelCase , _lowerCAmelCase , signatures={"""serving_default""": dummy_model.serving} )
__UpperCAmelCase =tf.saved_model.load(_lowerCAmelCase ).signatures["""serving_default"""]
for batch_size in range(1 , len(_lowerCAmelCase ) + 1 ):
__UpperCAmelCase ={
"""input_ids""": tf.constant(dummy_input_ids[:batch_size] ),
"""attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ),
}
__UpperCAmelCase =serving_func(**_lowerCAmelCase )["""sequences"""]
__UpperCAmelCase =test_model.generate(**_lowerCAmelCase , max_new_tokens=_lowerCAmelCase )
tf.debugging.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
@slow
def _a ( self : str ) -> Optional[int]:
__UpperCAmelCase =TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__UpperCAmelCase =1
__UpperCAmelCase =2
class _A ( tf.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]:
super(_lowerCAmelCase , self ).__init__()
__UpperCAmelCase =model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=_lowerCAmelCase , )
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> Dict:
__UpperCAmelCase =self.model.generate(
input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase , max_new_tokens=_lowerCAmelCase , return_dict_in_generate=_lowerCAmelCase , )
return {"sequences": outputs["sequences"]}
__UpperCAmelCase =[[2], [102, 103]]
__UpperCAmelCase =[[1], [1, 1]]
__UpperCAmelCase =DummyModel(model=_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_lowerCAmelCase , _lowerCAmelCase , signatures={"""serving_default""": dummy_model.serving} )
__UpperCAmelCase =tf.saved_model.load(_lowerCAmelCase ).signatures["""serving_default"""]
for input_row in range(len(_lowerCAmelCase ) ):
__UpperCAmelCase ={
"""input_ids""": tf.constant([dummy_input_ids[input_row]] ),
"""attention_mask""": tf.constant([dummy_attention_masks[input_row]] ),
}
__UpperCAmelCase =serving_func(**_lowerCAmelCase )["""sequences"""]
__UpperCAmelCase =test_model.generate(**_lowerCAmelCase , max_new_tokens=_lowerCAmelCase )
tf.debugging.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
@slow
@require_tensorflow_text
def _a ( self : List[Any] ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=_lowerCAmelCase )
class _A ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : List[str] ) -> List[str]:
super().__init__()
__UpperCAmelCase =text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_lowerCAmelCase , """spiece.model""" ) , """rb""" ).read() )
__UpperCAmelCase =TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : Tuple ) -> Optional[int]:
__UpperCAmelCase =self.tokenizer.tokenize(_lowerCAmelCase )
__UpperCAmelCase , __UpperCAmelCase =text.pad_model_inputs(
_lowerCAmelCase , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
__UpperCAmelCase =self.model.generate(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
return self.tokenizer.detokenize(_lowerCAmelCase )
__UpperCAmelCase =CompleteSentenceTransformer()
__UpperCAmelCase =tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" )
__UpperCAmelCase =complete_model(_lowerCAmelCase )
__UpperCAmelCase =tf.keras.Model(_lowerCAmelCase , _lowerCAmelCase )
keras_model.save(_lowerCAmelCase )
def _a ( self : List[str] ) -> str:
__UpperCAmelCase ={
"""do_sample""": True,
"""num_beams""": 1,
"""top_p""": 0.7,
"""top_k""": 10,
"""temperature""": 0.7,
}
__UpperCAmelCase =14
__UpperCAmelCase =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__UpperCAmelCase ="""Hello, my dog is cute and"""
__UpperCAmelCase =tokenizer(_lowerCAmelCase , return_tensors="""tf""" )
__UpperCAmelCase =TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__UpperCAmelCase =638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
__UpperCAmelCase =model.generate(**_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
__UpperCAmelCase =[638, 198]
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
__UpperCAmelCase =model.generate(**_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def _a ( self : List[Any] ) -> Any:
__UpperCAmelCase =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__UpperCAmelCase ="""Hugging Face is a technology company based in New York and Paris."""
__UpperCAmelCase =bart_tokenizer(_lowerCAmelCase , return_tensors="""tf""" ).input_ids
__UpperCAmelCase =TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__UpperCAmelCase =bart_model.generate(_lowerCAmelCase ).numpy()
class _A ( __lowercase ):
"""simple docstring"""
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : List[Any] ) -> str:
return super().call(_lowerCAmelCase , **_lowerCAmelCase )
__UpperCAmelCase =FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__UpperCAmelCase =bart_model.generate(_lowerCAmelCase , foo="""bar""" ).numpy()
self.assertTrue(np.array_equal(_lowerCAmelCase , _lowerCAmelCase ) )
class _A ( bart_model.model.encoder.__class__ ):
"""simple docstring"""
def _a ( self : int , __SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : Tuple ) -> int:
return super().call(_lowerCAmelCase , **_lowerCAmelCase )
__UpperCAmelCase =FakeEncoder(bart_model.config , bart_model.model.shared )
__UpperCAmelCase =fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
__UpperCAmelCase =bart_model.generate(_lowerCAmelCase ).numpy()
with self.assertRaises(_lowerCAmelCase ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_lowerCAmelCase , foo="""bar""" )
| 68 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Union[str, Any] =logging.get_logger(__name__)
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =torch.load(lowercase__ , map_location="cpu" )
if "model" in sd.keys():
UpperCAmelCase_ =torch.load(lowercase__ , map_location="cpu" )["model"]
# pop unnecessary weights
UpperCAmelCase_ =[
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowercase__ )
UpperCAmelCase_ ={
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
UpperCAmelCase_ =sd.pop(lowercase__ )
UpperCAmelCase_ =list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
UpperCAmelCase_ =sd[key]
# We split QKV in separate Q,K,V
UpperCAmelCase_ =key.replace(".qkv_proj." , ".q_proj." )
UpperCAmelCase_ =key.replace(".qkv_proj." , ".k_proj." )
UpperCAmelCase_ =key.replace(".qkv_proj." , ".v_proj." )
UpperCAmelCase_ =value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =torch.split(lowercase__ , depth // 3 , dim=0 )
UpperCAmelCase_ =q
UpperCAmelCase_ =k
UpperCAmelCase_ =v
del sd[key]
return sd
@torch.no_grad()
def a__ ( lowercase__ , lowercase__ , lowercase__=None ):
'''simple docstring'''
UpperCAmelCase_ =load_checkpoint(lowercase__ )
if config is not None:
UpperCAmelCase_ =OPTConfig.from_pretrained(lowercase__ )
else:
UpperCAmelCase_ =OPTConfig()
UpperCAmelCase_ =OPTModel(lowercase__ ).half().eval()
model.load_state_dict(lowercase__ )
# Check results
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
if __name__ == "__main__":
__lowercase : List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
__lowercase : str =parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 54 | 0 |
from datetime import datetime as dt
import os
from github import Github
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :List[Any] = Github(os.environ['''GITHUB_TOKEN'''] )
__magic_name__ :Dict = g.get_repo('''huggingface/transformers''' )
__magic_name__ :Union[str, Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
__magic_name__ :Optional[Any] = sorted([comment for comment in issue.get_comments()], key=lambda snake_case : i.created_at, reverse=lowercase__ )
__magic_name__ :int = comments[0] if len(lowercase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 0 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
__lowercase : str ={
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
__lowercase : Any ={
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =(images / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_ =images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCAmelCase_ =numpy_to_pil(lowercase__ )
return images
def a__ ( lowercase__ ):
'''simple docstring'''
if images.ndim == 3:
UpperCAmelCase_ =images[None, ...]
UpperCAmelCase_ =(images * 2_5_5).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
UpperCAmelCase_ =[Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
UpperCAmelCase_ =[Image.fromarray(lowercase__ ) for image in images]
return pil_images
| 54 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A_ = logging.get_logger(__name__)
class __lowercase ( __lowercase ):
lowercase = ['pixel_values']
def __init__( self : Tuple , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = PIL.Image.BICUBIC , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : Union[int, float] = 1 / 2_55 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , **__lowerCamelCase : Optional[Any] , ) -> None:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowercase = size if size is not None else {'''height''': 2_56, '''width''': 2_56}
lowercase = get_size_dict(_lowerCAmelCase )
lowercase = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
lowercase = get_size_dict(_lowerCAmelCase , param_name='''crop_size''' )
lowercase = do_resize
lowercase = size
lowercase = resample
lowercase = do_center_crop
lowercase = crop_size
lowercase = do_rescale
lowercase = rescale_factor
lowercase = do_normalize
lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self : str , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : PILImageResampling = PIL.Image.BICUBIC , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Union[str, Any] , ) -> np.ndarray:
'''simple docstring'''
lowercase = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return resize(
_lowerCAmelCase , size=(size['''height'''], size['''width''']) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def __a ( self : Optional[Any] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
lowercase = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(_lowerCAmelCase , size=(size['''height'''], size['''width''']) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def __a ( self : int , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[int, float] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : int , ) -> Any:
'''simple docstring'''
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def __a ( self : Dict , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : List[Any] , ) -> np.ndarray:
'''simple docstring'''
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def __a ( self : Union[str, Any] , __lowerCamelCase : ImageInput , __lowerCamelCase : bool = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : str=None , __lowerCamelCase : bool = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : bool = None , __lowerCamelCase : float = None , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : ChannelDimension = ChannelDimension.FIRST , **__lowerCamelCase : Optional[int] , ) -> PIL.Image.Image:
'''simple docstring'''
lowercase = do_resize if do_resize is not None else self.do_resize
lowercase = resample if resample is not None else self.resample
lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase = do_rescale if do_rescale is not None else self.do_rescale
lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase = do_normalize if do_normalize is not None else self.do_normalize
lowercase = image_mean if image_mean is not None else self.image_mean
lowercase = image_std if image_std is not None else self.image_std
lowercase = size if size is not None else self.size
lowercase = get_size_dict(_lowerCAmelCase )
lowercase = crop_size if crop_size is not None else self.crop_size
lowercase = get_size_dict(_lowerCAmelCase , param_name='''crop_size''' )
lowercase = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
lowercase = [self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_center_crop:
lowercase = [self.center_crop(image=_lowerCAmelCase , size=_lowerCAmelCase ) for image in images]
if do_rescale:
lowercase = [self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
if do_normalize:
lowercase = [self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images]
lowercase = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
lowercase = {'''pixel_values''': images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 604 |
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =int(lowercase__ )
if n_element < 1:
UpperCAmelCase_ =ValueError("a should be a positive number" )
raise my_error
UpperCAmelCase_ =[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =(0, 0, 0)
UpperCAmelCase_ =1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
__lowercase : Tuple =input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
__lowercase : Union[str, Any] =hamming(int(n))
print("""-----------------------------------------------------""")
print(f"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 54 | 0 |
"""simple docstring"""
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[Any] ) -> None:
_UpperCamelCase : int = {} # Mapping from char to TrieNode
_UpperCamelCase : Any = False
def __SCREAMING_SNAKE_CASE ( self : int , __a : list[str] ) -> None:
for word in words:
self.insert(_lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : str ) -> None:
_UpperCamelCase : Dict = self
for char in word:
if char not in curr.nodes:
_UpperCamelCase : int = TrieNode()
_UpperCamelCase : List[Any] = curr.nodes[char]
_UpperCamelCase : Tuple = True
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : str ) -> bool:
_UpperCamelCase : Optional[Any] = self
for char in word:
if char not in curr.nodes:
return False
_UpperCamelCase : str = curr.nodes[char]
return curr.is_leaf
def __SCREAMING_SNAKE_CASE ( self : str , __a : str ) -> None:
def _delete(__a : TrieNode , __a : str , __a : int ) -> bool:
if index == len(_lowerCAmelCase ):
# If word does not exist
if not curr.is_leaf:
return False
_UpperCamelCase : str = False
return len(curr.nodes ) == 0
_UpperCamelCase : List[Any] = word[index]
_UpperCamelCase : List[str] = curr.nodes.get(_lowerCAmelCase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_UpperCamelCase : int = _delete(_lowerCAmelCase , _lowerCAmelCase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , _lowerCAmelCase , 0 )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Any:
"""simple docstring"""
if node.is_leaf:
print(lowercase__ ,end=" " )
for key, value in node.nodes.items():
print_words(lowercase__ ,word + key )
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Dict = "banana bananas bandana band apple all beast".split()
_UpperCamelCase : Tuple = TrieNode()
root.insert_many(lowercase__ )
# print_words(root, "")
assert all(root.find(lowercase__ ) for word in words )
assert root.find("banana" )
assert not root.find("bandanas" )
assert not root.find("apps" )
assert root.find("apple" )
assert root.find("all" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def lowercase__ ( lowercase_ ,lowercase_ ) -> Any:
"""simple docstring"""
print(str(lowercase__ ) ,"works!" if passes else "doesn't work :(" )
def lowercase__ ( ) -> Optional[int]:
"""simple docstring"""
assert test_trie()
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
print_results("Testing trie functionality" ,test_trie() )
if __name__ == "__main__":
main()
| 624 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__lowercase : List[Any] =logging.get_logger(__name__)
class A ( __lowercase ):
def __init__( self: List[Any] , *_lowerCAmelCase: Optional[Any] , **_lowerCAmelCase: List[str] ) -> None:
'''simple docstring'''
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 54 | 0 |
"""simple docstring"""
from math import ceil
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :List[Any] ) -> Tuple:
'''simple docstring'''
lowercase = list(range(0 , lowercase__ ) )
lowercase = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
lowercase = []
for i in device_map_blocks:
if device_map_blocks.count(lowercase__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(lowercase__ )
# Missing blocks
lowercase = [i for i in blocks if i not in device_map_blocks]
lowercase = [i for i in device_map_blocks if i not in blocks]
if len(lowercase__ ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(lowercase__ ) )
if len(lowercase__ ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(lowercase__ ) )
if len(lowercase__ ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(lowercase__ ) )
def UpperCAmelCase__ ( lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowercase = list(range(lowercase__ ) )
lowercase = int(ceil(n_layers / len(lowercase__ ) ) )
lowercase = [layers[i : i + n_blocks] for i in range(0 , lowercase__ , lowercase__ )]
return dict(zip(lowercase__ , lowercase__ ) )
| 359 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class A ( __lowercase , unittest.TestCase ):
_snake_case =CanineTokenizer
_snake_case =False
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
UpperCAmelCase_ =CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase__ ( self: Optional[int] ) -> List[str]:
'''simple docstring'''
return CanineTokenizer.from_pretrained("google/canine-s" )
def lowerCAmelCase__ ( self: Union[str, Any] , **_lowerCAmelCase: List[Any] ) -> CanineTokenizer:
'''simple docstring'''
UpperCAmelCase_ =self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
UpperCAmelCase_ =1024
return tokenizer
@require_torch
def lowerCAmelCase__ ( self: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
UpperCAmelCase_ =[5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
UpperCAmelCase_ =tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =list(batch.input_ids.numpy()[0] )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def lowerCAmelCase__ ( self: int ) -> str:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
UpperCAmelCase_ =tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , _lowerCAmelCase )
self.assertIn("attention_mask" , _lowerCAmelCase )
self.assertIn("token_type_ids" , _lowerCAmelCase )
@require_torch
def lowerCAmelCase__ ( self: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =[
"What's the weater?",
"It's about 25 degrees.",
]
UpperCAmelCase_ =tokenizer(
text_target=_lowerCAmelCase , max_length=32 , padding="max_length" , truncation=_lowerCAmelCase , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def lowerCAmelCase__ ( self: Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ =tempfile.mkdtemp()
UpperCAmelCase_ =" He is very happy, UNwant\u00E9d,running"
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
shutil.rmtree(_lowerCAmelCase )
UpperCAmelCase_ =self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ =tempfile.mkdtemp()
UpperCAmelCase_ =" He is very happy, UNwant\u00E9d,running"
UpperCAmelCase_ =tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
UpperCAmelCase_ =chr(0xe0_07 )
additional_special_tokens.append(_lowerCAmelCase )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertIn(_lowerCAmelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_lowerCAmelCase )
def lowerCAmelCase__ ( self: int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ , UpperCAmelCase_ =self.get_clean_sequence(_lowerCAmelCase )
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_05
UpperCAmelCase_ =chr(_lowerCAmelCase )
tokenizer.add_special_tokens({"cls_token": special_token} )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
UpperCAmelCase_ =tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , input_encoded + special_token_id )
UpperCAmelCase_ =tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def lowerCAmelCase__ ( self: Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ =chr(0xe0_05 )
UpperCAmelCase_ =chr(0xe0_06 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=_lowerCAmelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
UpperCAmelCase_ =tokenizer.tokenize(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.tokenize(_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
self.assertEqual(token_a[0] , _lowerCAmelCase )
self.assertEqual(token_a[0] , _lowerCAmelCase )
@require_tokenizers
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
UpperCAmelCase_ =AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(_lowerCAmelCase )
tokenizer.from_pretrained(_lowerCAmelCase )
def lowerCAmelCase__ ( self: Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase_ =json.load(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase_ =json.load(_lowerCAmelCase )
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
UpperCAmelCase_ =[new_token_a]
UpperCAmelCase_ =[new_token_a]
with open(os.path.join(_lowerCAmelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase_ =tokenizer_class.from_pretrained(_lowerCAmelCase , extra_ids=0 )
self.assertIn(_lowerCAmelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
UpperCAmelCase_ =0xe0_07
UpperCAmelCase_ =chr(_lowerCAmelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase_ =[AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase )]
UpperCAmelCase_ =tokenizer_class.from_pretrained(
_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , extra_ids=0 )
self.assertIn(_lowerCAmelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ ="hello world"
if self.space_between_special_tokens:
UpperCAmelCase_ ="[CLS] hello world [SEP]"
else:
UpperCAmelCase_ =input
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.decode(_lowerCAmelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(_lowerCAmelCase , [output, output.lower()] )
def lowerCAmelCase__ ( self: List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ =[
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
UpperCAmelCase_ ="a"
UpperCAmelCase_ =ord(_lowerCAmelCase )
for attr in attributes_list:
setattr(_lowerCAmelCase , attr + "_id" , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , attr + "_id" ) , _lowerCAmelCase )
setattr(_lowerCAmelCase , attr + "_id" , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , attr + "_id" ) , _lowerCAmelCase )
setattr(_lowerCAmelCase , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens_ids" ) , [] )
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
setattr(_lowerCAmelCase , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def lowerCAmelCase__ ( self: List[str] ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Dict ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Any ) -> List[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: List[Any] ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Tuple ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: str ) -> str:
'''simple docstring'''
pass
| 54 | 0 |
"""simple docstring"""
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
A_ : Any = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCamelCase (datasets.BuilderConfig ):
lowerCamelCase__ : str = None
lowerCamelCase__ : List[Any] = 'utf-8'
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : Dict = None
lowerCamelCase__ : List[Any] = True # deprecated
lowerCamelCase__ : Optional[Any] = None # deprecated
lowerCamelCase__ : Optional[int] = 1_0 << 2_0 # 10MB
lowerCamelCase__ : Dict = None
class lowerCamelCase (datasets.ArrowBasedBuilder ):
lowerCamelCase__ : Tuple = JsonConfig
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
SCREAMING_SNAKE_CASE__ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : int ) -> str:
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
SCREAMING_SNAKE_CASE__ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCAmelCase , (str, list, tuple) ):
SCREAMING_SNAKE_CASE__ = data_files
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ = [files]
SCREAMING_SNAKE_CASE__ = [dl_manager.iter_files(_lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
SCREAMING_SNAKE_CASE__ = []
for split_name, files in data_files.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ = [files]
SCREAMING_SNAKE_CASE__ = [dl_manager.iter_files(_lowerCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCAmelCase , gen_kwargs={"""files""": files} ) )
return splits
def SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : pa.Table ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
SCREAMING_SNAKE_CASE__ = self.config.features.arrow_schema.field(_lowerCAmelCase ).type
SCREAMING_SNAKE_CASE__ = pa_table.append_column(_lowerCAmelCase , pa.array([None] * len(_lowerCAmelCase ) , type=_lowerCAmelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE__ = table_cast(_lowerCAmelCase , self.config.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : int ) -> Tuple:
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCAmelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_lowerCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
SCREAMING_SNAKE_CASE__ = json.load(_lowerCAmelCase )
# We keep only the field we are interested in
SCREAMING_SNAKE_CASE__ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_lowerCAmelCase , (list, tuple) ):
SCREAMING_SNAKE_CASE__ = set().union(*[row.keys() for row in dataset] )
SCREAMING_SNAKE_CASE__ = {col: [row.get(_lowerCAmelCase ) for row in dataset] for col in keys}
else:
SCREAMING_SNAKE_CASE__ = dataset
SCREAMING_SNAKE_CASE__ = pa.Table.from_pydict(_lowerCAmelCase )
yield file_idx, self._cast_table(_lowerCAmelCase )
# If the file has one json object per line
else:
with open(_lowerCAmelCase , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
SCREAMING_SNAKE_CASE__ = max(self.config.chunksize // 3_2 , 1_6 << 1_0 )
SCREAMING_SNAKE_CASE__ = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
SCREAMING_SNAKE_CASE__ = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_lowerCAmelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
SCREAMING_SNAKE_CASE__ = batch.decode(self.config.encoding , errors=_lowerCAmelCase ).encode("""utf-8""" )
try:
while True:
try:
SCREAMING_SNAKE_CASE__ = paj.read_json(
io.BytesIO(_lowerCAmelCase ) , read_options=paj.ReadOptions(block_size=_lowerCAmelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_lowerCAmelCase , pa.ArrowInvalid )
and "straddling" not in str(_lowerCAmelCase )
or block_size > len(_lowerCAmelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(_lowerCAmelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_lowerCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
SCREAMING_SNAKE_CASE__ = json.load(_lowerCAmelCase )
except json.JSONDecodeError:
logger.error(F"""Failed to read file \'{file}\' with error {type(_lowerCAmelCase )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_lowerCAmelCase , _lowerCAmelCase ): # list is the only sequence type supported in JSON
try:
SCREAMING_SNAKE_CASE__ = set().union(*[row.keys() for row in dataset] )
SCREAMING_SNAKE_CASE__ = {col: [row.get(_lowerCAmelCase ) for row in dataset] for col in keys}
SCREAMING_SNAKE_CASE__ = pa.Table.from_pydict(_lowerCAmelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file \'{file}\' with error {type(_lowerCAmelCase )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(_lowerCAmelCase )
break
else:
logger.error(F"""Failed to read file \'{file}\' with error {type(_lowerCAmelCase )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_lowerCAmelCase )
batch_idx += 1
| 196 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowercase : Optional[int] ="""\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
__lowercase : Dict ="""\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
__lowercase : List[str] ="""\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def lowerCAmelCase__ ( self: int ) -> MetricInfo:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: List[List[List[str]]] , _lowerCAmelCase: List[List[str]] , _lowerCAmelCase: int = 1 , _lowerCAmelCase: int = 4 , ) -> Dict[str, float]:
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_lowerCAmelCase , hypotheses=_lowerCAmelCase , min_len=_lowerCAmelCase , max_len=_lowerCAmelCase )
}
| 54 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if len(lowercase__ ) == 0:
return array
_lowerCAmelCase, _lowerCAmelCase : str = min(lowercase__ ), max(lowercase__ )
# Compute the variables
_lowerCAmelCase : str = _max - _min + 1
_lowerCAmelCase, _lowerCAmelCase : str = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
_lowerCAmelCase : Dict = i - _min
_lowerCAmelCase : Union[str, Any] = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
_lowerCAmelCase : Tuple = 0
for i in range(lowercase__ ):
while holes_repeat[i] > 0:
_lowerCAmelCase : Optional[Any] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = input("""Enter numbers separated by comma:\n""")
_lowerCAmelCase = [int(x) for x in user_input.split(""",""")]
print(pigeon_sort(unsorted))
| 259 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A ( __lowercase , unittest.TestCase ):
_snake_case =KandinskyVaaImgaImgPipeline
_snake_case =['''image_embeds''', '''negative_image_embeds''', '''image''']
_snake_case =[
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
_snake_case =[
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_snake_case =False
@property
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[str]:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__ ( self: List[str] ) -> Dict:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self: int ) -> str:
'''simple docstring'''
return 100
@property
def lowerCAmelCase__ ( self: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ ={
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCAmelCase_ =UNetaDConditionModel(**_lowerCAmelCase )
return model
@property
def lowerCAmelCase__ ( self: Any ) -> Tuple:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ =VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase__ ( self: Dict ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.dummy_unet
UpperCAmelCase_ =self.dummy_movq
UpperCAmelCase_ ={
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
UpperCAmelCase_ =DDIMScheduler(**_lowerCAmelCase )
UpperCAmelCase_ ={
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: Any , _lowerCAmelCase: Optional[Any]=0 ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
UpperCAmelCase_ =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCAmelCase )
# create init_image
UpperCAmelCase_ =floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
UpperCAmelCase_ =image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ =Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert("RGB" ).resize((256, 256) )
if str(_lowerCAmelCase ).startswith("mps" ):
UpperCAmelCase_ =torch.manual_seed(_lowerCAmelCase )
else:
UpperCAmelCase_ =torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
UpperCAmelCase_ ={
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self: int ) -> int:
'''simple docstring'''
UpperCAmelCase_ ="cpu"
UpperCAmelCase_ =self.get_dummy_components()
UpperCAmelCase_ =self.pipeline_class(**_lowerCAmelCase )
UpperCAmelCase_ =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
UpperCAmelCase_ =output.images
UpperCAmelCase_ =pipe(
**self.get_dummy_inputs(_lowerCAmelCase ) , return_dict=_lowerCAmelCase , )[0]
UpperCAmelCase_ =image[0, -3:, -3:, -1]
UpperCAmelCase_ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ =np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: List[Any] ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_img2img_frog.npy" )
UpperCAmelCase_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCAmelCase_ ="A red cartoon frog, 4k"
UpperCAmelCase_ =KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCAmelCase )
UpperCAmelCase_ =KandinskyVaaImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
UpperCAmelCase_ =pipeline.to(_lowerCAmelCase )
pipeline.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ , UpperCAmelCase_ =pipe_prior(
_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCAmelCase_ =pipeline(
image=_lowerCAmelCase , image_embeds=_lowerCAmelCase , negative_image_embeds=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
UpperCAmelCase_ =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 54 | 0 |
import collections
import os
import re
from pathlib import Path
_lowerCamelCase : Tuple = """src/transformers"""
# Matches is_xxx_available()
_lowerCamelCase : Union[str, Any] = re.compile(R"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
_lowerCamelCase : List[str] = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_lowerCamelCase : Optional[int] = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
_lowerCamelCase : Any = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
_lowerCamelCase : Optional[Any] = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_lowerCamelCase : Optional[int] = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
_lowerCamelCase : List[Any] = re.compile(R"^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
_lowerCamelCase : Optional[int] = re.compile(R"^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
_lowerCamelCase : Union[str, Any] = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
_lowerCamelCase : Optional[int] = re.compile(R"^\s*try:")
# Catches a line with else:
_lowerCamelCase : Union[str, Any] = re.compile(R"^\s*else:")
def _UpperCAmelCase (UpperCamelCase_ : str ):
'''simple docstring'''
if _re_test_backend.search(lowercase__ ) is None:
return None
_lowerCAmelCase : Optional[Any] = [b[0] for b in _re_backend.findall(lowercase__ )]
backends.sort()
return "_and_".join(lowercase__ )
def _UpperCAmelCase (UpperCamelCase_ : Any ):
'''simple docstring'''
with open(lowercase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_lowerCAmelCase : List[str] = f.readlines()
_lowerCAmelCase : Optional[Any] = 0
while line_index < len(lowercase__ ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowercase__ ):
return None
# First grab the objects without a specific backend in _import_structure
_lowerCAmelCase : Dict = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
_lowerCAmelCase : int = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowercase__ ):
_lowerCAmelCase : Dict = _re_one_line_import_struct.search(lowercase__ ).groups()[0]
_lowerCAmelCase : List[str] = re.findall(R"""\[([^\]]+)\]""" , lowercase__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
_lowerCAmelCase : Optional[int] = _re_import_struct_key_value.search(lowercase__ )
if single_line_import_search is not None:
_lowerCAmelCase : int = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(lowercase__ ) > 0]
objects.extend(lowercase__ )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
_lowerCAmelCase : Any = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
_lowerCAmelCase : Union[str, Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowerCAmelCase : Any = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowerCAmelCase : Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
_lowerCAmelCase : List[str] = lines[line_index]
if _re_import_struct_add_one.search(lowercase__ ) is not None:
objects.append(_re_import_struct_add_one.search(lowercase__ ).groups()[0] )
elif _re_import_struct_add_many.search(lowercase__ ) is not None:
_lowerCAmelCase : List[str] = _re_import_struct_add_many.search(lowercase__ ).groups()[0].split(""", """ )
_lowerCAmelCase : Tuple = [obj[1:-1] for obj in imports if len(lowercase__ ) > 0]
objects.extend(lowercase__ )
elif _re_between_brackets.search(lowercase__ ) is not None:
_lowerCAmelCase : List[Any] = _re_between_brackets.search(lowercase__ ).groups()[0].split(""", """ )
_lowerCAmelCase : Optional[int] = [obj[1:-1] for obj in imports if len(lowercase__ ) > 0]
objects.extend(lowercase__ )
elif _re_quote_object.search(lowercase__ ) is not None:
objects.append(_re_quote_object.search(lowercase__ ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
_lowerCAmelCase : Optional[int] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_lowerCAmelCase : Dict = []
while (
line_index < len(lowercase__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
_lowerCAmelCase : int = lines[line_index]
_lowerCAmelCase : Dict = _re_import.search(lowercase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
_lowerCAmelCase : List[Any] = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(lowercase__ ):
# If the line is an if is_backend_available, we grab all objects associated.
_lowerCAmelCase : Dict = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowerCAmelCase : Any = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowerCAmelCase : Union[str, Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
_lowerCAmelCase : Any = lines[line_index]
_lowerCAmelCase : Optional[int] = _re_import.search(lowercase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
_lowerCAmelCase : Optional[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _UpperCAmelCase (UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple ):
'''simple docstring'''
def find_duplicates(UpperCamelCase_ : Optional[Any] ):
return [k for k, v in collections.Counter(lowercase__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_lowerCAmelCase : str = []
for key in import_dict_objects.keys():
_lowerCAmelCase : Optional[Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
_lowerCAmelCase : Any = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_lowerCAmelCase : Optional[Any] = """base imports""" if key == """none""" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
for root, _, files in os.walk(lowercase__ ):
if "__init__.py" in files:
_lowerCAmelCase : Union[str, Any] = os.path.join(lowercase__ , """__init__.py""" )
_lowerCAmelCase : Optional[Any] = parse_init(lowercase__ )
if objects is not None:
_lowerCAmelCase : List[str] = analyze_results(*lowercase__ )
if len(lowercase__ ) > 0:
_lowerCAmelCase : int = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("""\n""".join(lowercase__ ) )
if len(lowercase__ ) > 0:
raise ValueError("""\n\n""".join(lowercase__ ) )
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
for path, directories, files in os.walk(lowercase__ ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(lowercase__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowercase__ ) / folder).glob("""*.py""" ) ) ) == 0:
continue
_lowerCAmelCase : Union[str, Any] = str((Path(lowercase__ ) / folder).relative_to(lowercase__ ) )
_lowerCAmelCase : Optional[int] = short_path.replace(os.path.sep , """.""" )
submodules.append(lowercase__ )
for fname in files:
if fname == "__init__.py":
continue
_lowerCAmelCase : str = str((Path(lowercase__ ) / fname).relative_to(lowercase__ ) )
_lowerCAmelCase : List[Any] = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(lowercase__ )
return submodules
_lowerCamelCase : Optional[Any] = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def _UpperCAmelCase ():
'''simple docstring'''
from transformers.utils import direct_transformers_import
_lowerCAmelCase : Union[str, Any] = direct_transformers_import(lowercase__ )
_lowerCAmelCase : List[Any] = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(lowercase__ , """__init__.py""" ) , """r""" ) as f:
_lowerCAmelCase : str = f.read()
import_structure_keys.update(set(re.findall(R"""import_structure\[\"([^\"]*)\"\]""" , lowercase__ ) ) )
_lowerCAmelCase : Tuple = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(lowercase__ ) > 0:
_lowerCAmelCase : str = """\n""".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
F"{list_of_modules}\n"
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 429 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class A ( unittest.TestCase ):
def __init__( self: Optional[int] , _lowerCAmelCase: Tuple , _lowerCAmelCase: Optional[Any]=13 , _lowerCAmelCase: Optional[int]=7 , _lowerCAmelCase: Any=True , _lowerCAmelCase: List[Any]=True , _lowerCAmelCase: List[str]=True , _lowerCAmelCase: str=True , _lowerCAmelCase: Optional[int]=99 , _lowerCAmelCase: Any=32 , _lowerCAmelCase: Any=5 , _lowerCAmelCase: Tuple=4 , _lowerCAmelCase: Union[str, Any]=37 , _lowerCAmelCase: List[str]="gelu" , _lowerCAmelCase: Dict=0.1 , _lowerCAmelCase: Tuple=0.1 , _lowerCAmelCase: int=512 , _lowerCAmelCase: Tuple=16 , _lowerCAmelCase: Tuple=2 , _lowerCAmelCase: str=0.02 , _lowerCAmelCase: Optional[Any]=4 , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =seq_length
UpperCAmelCase_ =is_training
UpperCAmelCase_ =use_attention_mask
UpperCAmelCase_ =use_token_type_ids
UpperCAmelCase_ =use_labels
UpperCAmelCase_ =vocab_size
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =num_attention_heads
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =max_position_embeddings
UpperCAmelCase_ =type_vocab_size
UpperCAmelCase_ =type_sequence_label_size
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =num_choices
def lowerCAmelCase__ ( self: Dict ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ =None
if self.use_attention_mask:
UpperCAmelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ =None
if self.use_token_type_ids:
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ =RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self: str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self: Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ =True
UpperCAmelCase_ =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A ( __lowercase , unittest.TestCase ):
_snake_case =True
_snake_case =(
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self: Dict ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =FlaxRobertaModelTester(self )
@slow
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase_ =model_class_name.from_pretrained("roberta-base" , from_pt=_lowerCAmelCase )
UpperCAmelCase_ =model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
| 54 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = torch.device('cpu')
def _A ( ) -> Optional[int]:
_lowercase : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowercase : Dict = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
def _A ( snake_case ) -> Dict:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703E00, 2.1107E00, -2.0811E00, 8.8685E-01, 2.4360E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636E-01, 2.3478E-01, -1.6963E00, -1.7381E00, -8.6337E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768E-01, -4.7429E-01, -1.0897E00, -1.0248E00, 3.5523E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330E-01, 2.4211E-01, -6.0185E-01, -8.2789E-01, -6.0446E-02] )
def _A ( snake_case , snake_case , snake_case ) -> Any:
_lowercase : List[str] = dct.pop(lowercase__ )
_lowercase : Dict = val
def _A ( snake_case ) -> Dict:
_lowercase : List[Any] = []
for k in state_dict.keys():
_lowercase : int = k
if ".pwconv" in k:
_lowercase : int = k_new.replace(".pwconv" , ".point_wise_conv" )
if ".dwconv" in k:
_lowercase : Optional[Any] = k_new.replace(".dwconv" , ".depth_wise_conv" )
if ".Proj." in k:
_lowercase : Optional[Any] = k_new.replace(".Proj." , ".proj." )
if "patch_embed" in k_new:
_lowercase : str = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
_lowercase : Tuple = k_new.split("." )
if ls[2].isdigit():
_lowercase : Any = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
_lowercase : List[str] = k_new.replace("network" , "swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _A ( snake_case , snake_case , snake_case ) -> Optional[Any]:
_lowercase : int = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
_lowercase : List[str] = 10_00
_lowercase : List[str] = "huggingface/label-files"
_lowercase : Optional[int] = "imagenet-1k-id2label.json"
_lowercase : Union[str, Any] = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="dataset" ) , "r" ) )
_lowercase : str = {int(lowercase__ ): v for k, v in idalabel.items()}
_lowercase : Union[str, Any] = idalabel
_lowercase : Tuple = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
_lowercase : str = [3, 3, 6, 4]
_lowercase : List[Any] = [48, 56, 1_12, 2_20]
elif swiftformer_name == "swiftformer_s":
_lowercase : List[Any] = [3, 3, 9, 6]
_lowercase : Optional[Any] = [48, 64, 1_68, 2_24]
elif swiftformer_name == "swiftformer_l1":
_lowercase : Union[str, Any] = [4, 3, 10, 5]
_lowercase : Tuple = [48, 96, 1_92, 3_84]
elif swiftformer_name == "swiftformer_l3":
_lowercase : List[str] = [4, 4, 12, 6]
_lowercase : str = [64, 1_28, 3_20, 5_12]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
_lowercase : Optional[int] = torch.hub.load_state_dict_from_url(lowercase__ , map_location="cpu" , check_hash=lowercase__ )
else:
_lowercase : Tuple = torch.load(lowercase__ , map_location="cpu" )
_lowercase : Optional[int] = checkpoint
_lowercase : int = create_rename_keys(lowercase__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
# load HuggingFace model
_lowercase : Optional[int] = SwiftFormerForImageClassification(lowercase__ ).eval()
hf_model.load_state_dict(lowercase__ )
# prepare test inputs
_lowercase : Tuple = prepare_img()
_lowercase : Any = ViTImageProcessor.from_pretrained("preprocessor_config" )
_lowercase : Tuple = processor(images=lowercase__ , return_tensors="pt" )
# compare outputs from both models
_lowercase : Optional[Any] = get_expected_output(lowercase__ )
_lowercase : Optional[Any] = hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 10_00] )
assert torch.allclose(hf_logits[0, 0:5] , lowercase__ , atol=1E-3 )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
_snake_case = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 245 |
from __future__ import annotations
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , a % b )
UpperCAmelCase_ =a // b
return (y, x - k * y)
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , lowercase__ )
UpperCAmelCase_ =na * na
UpperCAmelCase_ =ra * x * na + ra * y * na
return (n % m + m) % m
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , lowercase__ )
if b < 0:
UpperCAmelCase_ =(b % n + n) % n
return b
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =invert_modulo(lowercase__ , lowercase__ ), invert_modulo(lowercase__ , lowercase__ )
UpperCAmelCase_ =na * na
UpperCAmelCase_ =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="""chinese_remainder_theorem""", verbose=True)
testmod(name="""chinese_remainder_theorem2""", verbose=True)
testmod(name="""invert_modulo""", verbose=True)
testmod(name="""extended_euclid""", verbose=True)
| 54 | 0 |
import requests
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ ) -> Tuple:
'''simple docstring'''
_lowercase : Any = {'''Content-Type''': '''application/json'''}
_lowercase : Optional[Any] = requests.post(lowercase__ , json={'''text''': message_body} , headers=lowercase__ )
if response.status_code != 200:
_lowercase : Optional[int] = (
'''Request to slack returned an error '''
F'{response.status_code}, the response is:\n{response.text}'
)
raise ValueError(lowercase__ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 322 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__lowercase : Tuple =logging.getLogger(__name__)
__lowercase : Optional[int] =tf.data.AUTOTUNE
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config" , type=lowercase__ , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , )
parser.add_argument(
"--tokenizer" , type=lowercase__ , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , )
parser.add_argument(
"--per_replica_batch_size" , type=lowercase__ , default=8 , help="Batch size per TPU core." , )
parser.add_argument(
"--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , )
parser.add_argument(
"--tpu_name" , type=lowercase__ , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , )
parser.add_argument(
"--tpu_zone" , type=lowercase__ , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , )
parser.add_argument(
"--gcp_project" , type=lowercase__ , help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , )
parser.add_argument(
"--train_dataset" , type=lowercase__ , help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--shuffle_buffer_size" , type=lowercase__ , default=2**1_8 , help="Size of the shuffle buffer (in samples)" , )
parser.add_argument(
"--eval_dataset" , type=lowercase__ , help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--num_epochs" , type=lowercase__ , default=1 , help="Number of epochs to train for." , )
parser.add_argument(
"--learning_rate" , type=lowercase__ , default=1E-4 , help="Learning rate to use for training." , )
parser.add_argument(
"--weight_decay_rate" , type=lowercase__ , default=1E-3 , help="Weight decay rate to use for training." , )
parser.add_argument(
"--max_length" , type=lowercase__ , default=5_1_2 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , )
parser.add_argument(
"--mlm_probability" , type=lowercase__ , default=0.15 , help="Fraction of tokens to mask during training." , )
parser.add_argument("--output_dir" , type=lowercase__ , required=lowercase__ , help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id" , type=lowercase__ , help="Model ID to upload to on the Hugging Face Hub." )
UpperCAmelCase_ =parser.parse_args()
return args
def a__ ( lowercase__ ):
'''simple docstring'''
try:
if args.tpu_name:
UpperCAmelCase_ =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
UpperCAmelCase_ =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(lowercase__ )
tf.tpu.experimental.initialize_tpu_system(lowercase__ )
return tpu
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =0
for file in file_list:
UpperCAmelCase_ =file.split("/" )[-1]
UpperCAmelCase_ =re.search(R"-\d+-(\d+)\.tfrecord" , lowercase__ ).group(1 )
UpperCAmelCase_ =int(lowercase__ )
num_samples += sample_count
return num_samples
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=None ):
'''simple docstring'''
UpperCAmelCase_ =count_samples(lowercase__ )
UpperCAmelCase_ =tf.data.Dataset.from_tensor_slices(lowercase__ )
if shuffle:
UpperCAmelCase_ =dataset.shuffle(len(lowercase__ ) )
UpperCAmelCase_ =tf.data.TFRecordDataset(lowercase__ , num_parallel_reads=lowercase__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
UpperCAmelCase_ =dataset.apply(tf.data.experimental.assert_cardinality(lowercase__ ) )
UpperCAmelCase_ =dataset.map(lowercase__ , num_parallel_calls=lowercase__ )
if shuffle:
assert shuffle_buffer_size is not None
UpperCAmelCase_ =dataset.shuffle(args.shuffle_buffer_size )
UpperCAmelCase_ =dataset.batch(lowercase__ , drop_remainder=lowercase__ )
UpperCAmelCase_ =dataset.map(lowercase__ , num_parallel_calls=lowercase__ )
UpperCAmelCase_ =dataset.prefetch(lowercase__ )
return dataset
def a__ ( lowercase__ ):
'''simple docstring'''
if not args.no_tpu:
UpperCAmelCase_ =initialize_tpu(lowercase__ )
UpperCAmelCase_ =tf.distribute.TPUStrategy(lowercase__ )
else:
UpperCAmelCase_ =tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
UpperCAmelCase_ =AutoTokenizer.from_pretrained(args.tokenizer )
UpperCAmelCase_ =AutoConfig.from_pretrained(args.pretrained_model_config )
UpperCAmelCase_ =tokenizer.vocab_size
UpperCAmelCase_ =tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) )
if not training_records:
raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' )
UpperCAmelCase_ =tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) )
if not eval_records:
raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' )
UpperCAmelCase_ =count_samples(lowercase__ )
UpperCAmelCase_ =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
UpperCAmelCase_ =steps_per_epoch * args.num_epochs
with strategy.scope():
UpperCAmelCase_ =TFAutoModelForMaskedLM.from_config(lowercase__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
UpperCAmelCase_ , UpperCAmelCase_ =create_optimizer(
num_train_steps=lowercase__ , num_warmup_steps=total_train_steps // 2_0 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowercase__ , metrics=["accuracy"] )
def decode_fn(lowercase__ ):
UpperCAmelCase_ ={
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowercase__ , lowercase__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
UpperCAmelCase_ =DataCollatorForLanguageModeling(
tokenizer=lowercase__ , mlm_probability=args.mlm_probability , mlm=lowercase__ , return_tensors="tf" )
def mask_with_collator(lowercase__ ):
# TF really needs an isin() function
UpperCAmelCase_ =(
~tf.cast(batch["attention_mask"] , tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
UpperCAmelCase_ , UpperCAmelCase_ =data_collator.tf_mask_tokens(
batch["input_ids"] , vocab_size=len(lowercase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase__ , )
return batch
UpperCAmelCase_ =args.per_replica_batch_size * strategy.num_replicas_in_sync
UpperCAmelCase_ =prepare_dataset(
lowercase__ , decode_fn=lowercase__ , mask_fn=lowercase__ , batch_size=lowercase__ , shuffle=lowercase__ , shuffle_buffer_size=args.shuffle_buffer_size , )
UpperCAmelCase_ =prepare_dataset(
lowercase__ , decode_fn=lowercase__ , mask_fn=lowercase__ , batch_size=lowercase__ , shuffle=lowercase__ , )
UpperCAmelCase_ =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase__ ) )
model.fit(
lowercase__ , validation_data=lowercase__ , epochs=args.num_epochs , callbacks=lowercase__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__lowercase : Union[str, Any] =parse_args()
main(args)
| 54 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase: List[str] = [0 for i in range(r + 1 )]
# nc0 = 1
_lowercase: List[str] = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
_lowercase: int = min(lowercase__ , lowercase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=1_0, r=5))
| 353 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A :
@staticmethod
def lowerCAmelCase__ ( *_lowerCAmelCase: List[Any] , **_lowerCAmelCase: List[str] ) -> List[str]:
'''simple docstring'''
pass
@is_pipeline_test
@require_torch
@require_vision
class A ( unittest.TestCase ):
_snake_case =MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: List[str] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
UpperCAmelCase_ =[
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: str ) -> int:
'''simple docstring'''
UpperCAmelCase_ =vqa_pipeline(_lowerCAmelCase , top_k=1 )
self.assertEqual(
_lowerCAmelCase , [
[{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}],
[{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}],
] , )
@require_torch
def lowerCAmelCase__ ( self: Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
UpperCAmelCase_ ="./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ ="How many cats are there?"
UpperCAmelCase_ =vqa_pipeline(image=_lowerCAmelCase , question="How many cats are there?" , top_k=2 )
self.assertEqual(
_lowerCAmelCase , [{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}, {"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}] )
UpperCAmelCase_ =vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
_lowerCAmelCase , [{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}, {"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}] )
@slow
@require_torch
def lowerCAmelCase__ ( self: List[str] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
UpperCAmelCase_ ="./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ ="How many cats are there?"
UpperCAmelCase_ =vqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
UpperCAmelCase_ =vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
UpperCAmelCase_ =vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [[{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def lowerCAmelCase__ ( self: int ) -> List[str]:
'''simple docstring'''
pass
| 54 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__A = random.Random()
def lowercase__ ( A_: Tuple , A_: Optional[Any]=1.0 , A_: str=None , A_: Tuple=None ) -> Any:
"""simple docstring"""
if rng is None:
__UpperCAmelCase =global_rng
__UpperCAmelCase =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str=7 , __SCREAMING_SNAKE_CASE : Optional[int]=400 , __SCREAMING_SNAKE_CASE : Optional[Any]=2000 , __SCREAMING_SNAKE_CASE : Union[str, Any]=10 , __SCREAMING_SNAKE_CASE : Optional[int]=160 , __SCREAMING_SNAKE_CASE : Any=8 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : Optional[int]=4000 , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : Optional[Any]=True , ) -> Dict:
__UpperCAmelCase =parent
__UpperCAmelCase =batch_size
__UpperCAmelCase =min_seq_length
__UpperCAmelCase =max_seq_length
__UpperCAmelCase =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCAmelCase =padding_value
__UpperCAmelCase =sampling_rate
__UpperCAmelCase =return_attention_mask
__UpperCAmelCase =do_normalize
__UpperCAmelCase =feature_size
__UpperCAmelCase =chunk_length
__UpperCAmelCase =hop_length
def _a ( self : Union[str, Any] ) -> int:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : Dict=False ) -> str:
def _flatten(__SCREAMING_SNAKE_CASE : List[str] ):
return list(itertools.chain(*_lowerCAmelCase ) )
if equal_length:
__UpperCAmelCase =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__UpperCAmelCase =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__UpperCAmelCase =[np.asarray(_lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _A ( __lowercase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Dict = WhisperFeatureExtractor if is_speech_available() else None
def _a ( self : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase =WhisperFeatureExtractionTester(self )
def _a ( self : Union[str, Any] ) -> Tuple:
__UpperCAmelCase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase =feat_extract_first.save_pretrained(_lowerCAmelCase )[0]
check_json_file_has_correct_format(_lowerCAmelCase )
__UpperCAmelCase =self.feature_extraction_class.from_pretrained(_lowerCAmelCase )
__UpperCAmelCase =feat_extract_first.to_dict()
__UpperCAmelCase =feat_extract_second.to_dict()
__UpperCAmelCase =feat_extract_first.mel_filters
__UpperCAmelCase =feat_extract_second.mel_filters
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Dict:
__UpperCAmelCase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase =os.path.join(_lowerCAmelCase , """feat_extract.json""" )
feat_extract_first.to_json_file(_lowerCAmelCase )
__UpperCAmelCase =self.feature_extraction_class.from_json_file(_lowerCAmelCase )
__UpperCAmelCase =feat_extract_first.to_dict()
__UpperCAmelCase =feat_extract_second.to_dict()
__UpperCAmelCase =feat_extract_first.mel_filters
__UpperCAmelCase =feat_extract_second.mel_filters
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self : List[str] ) -> Optional[Any]:
__UpperCAmelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__UpperCAmelCase =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__UpperCAmelCase =[np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
# Test feature size
__UpperCAmelCase =feature_extractor(_lowerCAmelCase , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__UpperCAmelCase =feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
__UpperCAmelCase =feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
# Test batched
__UpperCAmelCase =feature_extractor(_lowerCAmelCase , return_tensors="""np""" ).input_features
__UpperCAmelCase =feature_extractor(_lowerCAmelCase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__UpperCAmelCase =[floats_list((1, x) )[0] for x in (800, 800, 800)]
__UpperCAmelCase =np.asarray(_lowerCAmelCase )
__UpperCAmelCase =feature_extractor(_lowerCAmelCase , return_tensors="""np""" ).input_features
__UpperCAmelCase =feature_extractor(_lowerCAmelCase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
# Test truncation required
__UpperCAmelCase =[floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
__UpperCAmelCase =[np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
__UpperCAmelCase =[x[: feature_extractor.n_samples] for x in speech_inputs]
__UpperCAmelCase =[np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs_truncated]
__UpperCAmelCase =feature_extractor(_lowerCAmelCase , return_tensors="""np""" ).input_features
__UpperCAmelCase =feature_extractor(_lowerCAmelCase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
def _a ( self : str ) -> Tuple:
import torch
__UpperCAmelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase =np.random.rand(100 , 32 ).astype(np.floataa )
__UpperCAmelCase =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__UpperCAmelCase =feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__UpperCAmelCase =feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _a ( self : int , __SCREAMING_SNAKE_CASE : Optional[int] ) -> List[Any]:
__UpperCAmelCase =load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
__UpperCAmelCase =ds.sort("""id""" ).select(range(_lowerCAmelCase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def _a ( self : str ) -> Tuple:
__UpperCAmelCase =torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
__UpperCAmelCase =self._load_datasamples(1 )
__UpperCAmelCase =WhisperFeatureExtractor()
__UpperCAmelCase =feature_extractor(_lowerCAmelCase , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , _lowerCAmelCase , atol=1e-4 ) )
def _a ( self : str ) -> Any:
__UpperCAmelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase =self._load_datasamples(1 )[0]
__UpperCAmelCase =((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue
__UpperCAmelCase =feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_lowerCAmelCase )[0]
self.assertTrue(np.all(np.mean(_lowerCAmelCase ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_lowerCAmelCase ) - 1 ) < 1e-3 ) )
| 68 |
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
UpperCAmelCase_ =[p / w for p, w in zip(lowercase__ , lowercase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
UpperCAmelCase_ =sorted(lowercase__ )
# declaring useful variables
UpperCAmelCase_ =len(lowercase__ )
UpperCAmelCase_ =0
UpperCAmelCase_ =0
UpperCAmelCase_ =0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
UpperCAmelCase_ =sorted_profit_by_weight[length - i - 1]
UpperCAmelCase_ =profit_by_weight.index(lowercase__ )
UpperCAmelCase_ =-1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
__lowercase : List[str] =[int(x) for x in input("""Input profits separated by spaces: """).split()]
__lowercase : Union[str, Any] =[int(x) for x in input("""Input weights separated by spaces: """).split()]
__lowercase : Tuple =int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 54 | 0 |
class lowerCamelCase_ : # Public class to implement a graph
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :List[Any] = row
__magic_name__ :Optional[int] = col
__magic_name__ :str = graph
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__magic_name__ :str = [-1, 0, 1, -1, 1, -1, 0, 1]
__magic_name__ :Union[str, Any] = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _lowerCAmelCase ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , _lowerCAmelCase )
def A ( self ): # And finally, count all islands.
"""simple docstring"""
__magic_name__ :Optional[int] = [[False for j in range(self.COL )] for i in range(self.ROW )]
__magic_name__ :Optional[int] = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
count += 1
return count
| 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowercase : Dict ={
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any =["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] =[
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] =[
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__lowercase : Union[str, Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 54 | 0 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __lowercase :
def __init__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=13 , __lowerCamelCase : str=7 , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[Any]=99 , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : Optional[Any]=37 , __lowerCamelCase : Optional[Any]="gelu" , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Any=5_12 , __lowerCamelCase : str=16 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : str=3 , __lowerCamelCase : Any=4 , __lowerCamelCase : List[Any]=None , ) -> Dict:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
def __a ( self : List[str] ) -> Any:
'''simple docstring'''
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self : int ) -> Union[str, Any]:
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def __a ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase = LlamaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : int , ) -> Tuple:
'''simple docstring'''
lowercase = True
lowercase = LlamaModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , )
lowercase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , )
lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , ) -> Tuple:
'''simple docstring'''
lowercase = LlamaForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Any , ) -> str:
'''simple docstring'''
lowercase = True
lowercase = True
lowercase = LlamaForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
# first forward pass
lowercase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , use_cache=_lowerCAmelCase , )
lowercase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , )['''hidden_states'''][0]
lowercase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , )['''hidden_states'''][0]
# select random slice
lowercase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
def __a ( self : Optional[Any] ) -> int:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) ,(
lowercase
) ,(
lowercase
) ,(
lowercase
) ,(
lowercase
) ,(
lowercase
) ,(
lowercase
) ,
) = config_and_inputs
lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
lowercase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowercase = (LlamaForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
def __a ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase = LlamaModelTester(self )
lowercase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def __a ( self : Optional[int] ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def __a ( self : int ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __a ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase = type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __a ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
lowercase ,lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = input_dict['''input_ids''']
lowercase = input_ids.ne(1 ).to(_lowerCAmelCase )
lowercase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase = LlamaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __a ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase ,lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = '''single_label_classification'''
lowercase = input_dict['''input_ids''']
lowercase = input_ids.ne(1 ).to(_lowerCAmelCase )
lowercase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase = LlamaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __a ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
lowercase ,lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = '''multi_label_classification'''
lowercase = input_dict['''input_ids''']
lowercase = input_ids.ne(1 ).to(_lowerCAmelCase )
lowercase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase = LlamaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def __a ( self : Dict ) -> Tuple:
'''simple docstring'''
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def __a ( self : List[str] , __lowerCamelCase : Any ) -> Tuple:
'''simple docstring'''
lowercase ,lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = ids_tensor([1, 10] , config.vocab_size )
lowercase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase = LlamaModel(_lowerCAmelCase )
original_model.to(_lowerCAmelCase )
original_model.eval()
lowercase = original_model(_lowerCAmelCase ).last_hidden_state
lowercase = original_model(_lowerCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase = {'''type''': scaling_type, '''factor''': 10.0}
lowercase = LlamaModel(_lowerCAmelCase )
scaled_model.to(_lowerCAmelCase )
scaled_model.eval()
lowercase = scaled_model(_lowerCAmelCase ).last_hidden_state
lowercase = scaled_model(_lowerCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-5 ) )
@require_torch
class __lowercase ( unittest.TestCase ):
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def __a ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowercase = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
lowercase = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' )
lowercase = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowercase = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _lowerCAmelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def __a ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
lowercase = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' )
lowercase = model(torch.tensor(_lowerCAmelCase ) )
# Expected mean on dim = -1
lowercase = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _lowerCAmelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def __a ( self : str ) -> Any:
'''simple docstring'''
lowercase = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
lowercase = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' )
lowercase = model(torch.tensor(_lowerCAmelCase ) )
# Expected mean on dim = -1
lowercase = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def __a ( self : Tuple ) -> int:
'''simple docstring'''
lowercase = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
lowercase = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' )
lowercase = model(torch.tensor(_lowerCAmelCase ) )
lowercase = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowercase = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _lowerCAmelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def __a ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi'''
lowercase = '''Simply put, the theory of relativity states that '''
lowercase = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
lowercase = tokenizer.encode(_lowerCAmelCase , return_tensors='''pt''' )
lowercase = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=_lowerCAmelCase )
# greedy generation outputs
lowercase = model.generate(_lowerCAmelCase , max_new_tokens=64 , top_p=_lowerCAmelCase , temperature=1 , do_sample=_lowerCAmelCase )
lowercase = tokenizer.decode(generated_ids[0] , skip_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
| 604 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a__ ( lowercase__ , lowercase__ , lowercase__=1_0_2_4 , lowercase__=1_0_2_4 , lowercase__=False , **lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =AutoTokenizer.from_pretrained(lowercase__ )
UpperCAmelCase_ =SeqaSeqDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ , type_path="train" , **lowercase__ )
UpperCAmelCase_ =tok.pad_token_id
def get_lens(lowercase__ ):
UpperCAmelCase_ =tqdm(
DataLoader(lowercase__ , batch_size=5_1_2 , num_workers=8 , shuffle=lowercase__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCAmelCase_ =[]
for batch in dl:
UpperCAmelCase_ =batch["input_ids"].ne(lowercase__ ).sum(1 ).tolist()
UpperCAmelCase_ =batch["labels"].ne(lowercase__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowercase__ , lowercase__ ):
max_lens.append(max(lowercase__ , lowercase__ ) )
else:
max_lens.extend(lowercase__ )
return max_lens
UpperCAmelCase_ =get_lens(lowercase__ )
UpperCAmelCase_ =SeqaSeqDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ , type_path="val" , **lowercase__ )
UpperCAmelCase_ =get_lens(lowercase__ )
pickle_save(lowercase__ , train_ds.len_file )
pickle_save(lowercase__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 54 | 0 |
"""simple docstring"""
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowerCamelCase__ = False
try:
lowerCamelCase__ = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[Any] , __a : str = None , __a : list = [] ) -> Optional[Any]:
_UpperCamelCase : Dict = 0
_UpperCamelCase : Optional[int] = choices
_UpperCamelCase : int = prompt
if sys.platform == "win32":
_UpperCamelCase : Dict = "*"
else:
_UpperCamelCase : Union[str, Any] = "➔ "
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Dict , __a : str = "" ) -> int:
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , _lowerCAmelCase )
else:
forceWrite(self.choices[index] , _lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( self : int , __a : int ) -> Optional[Any]:
if index == self.position:
forceWrite(F''' {self.arrow_char} ''' )
self.write_choice(_lowerCAmelCase )
else:
forceWrite(F''' {self.choices[index]}''' )
reset_cursor()
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Direction , __a : int = 1 ) -> Dict:
_UpperCamelCase : List[str] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_lowerCAmelCase )
move_cursor(_lowerCAmelCase , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_lowerCAmelCase )] for number in range(10 )] )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
_UpperCamelCase : List[Any] = int(chr(self.current_selection ) )
_UpperCamelCase : Dict = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , _lowerCAmelCase )
else:
return
else:
return
def __SCREAMING_SNAKE_CASE ( self : int , __a : int = 0 ) -> Dict:
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
_UpperCamelCase : Union[str, Any] = default_choice
for i in range(len(self.choices ) ):
self.print_choice(_lowerCAmelCase )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
_UpperCamelCase : Optional[int] = int(builtins.input() )
except ValueError:
_UpperCamelCase : str = default_choice
else:
_UpperCamelCase : Optional[Any] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(_lowerCAmelCase , "\n" )
return choice
| 624 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A :
def __init__( self: Any , _lowerCAmelCase: str , _lowerCAmelCase: Optional[Any]=13 , _lowerCAmelCase: List[str]=30 , _lowerCAmelCase: List[Any]=2 , _lowerCAmelCase: List[str]=3 , _lowerCAmelCase: Dict=True , _lowerCAmelCase: int=True , _lowerCAmelCase: Tuple=32 , _lowerCAmelCase: str=2 , _lowerCAmelCase: Dict=4 , _lowerCAmelCase: Dict=37 , _lowerCAmelCase: Optional[Any]="gelu" , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: Union[str, Any]=10 , _lowerCAmelCase: str=0.02 , _lowerCAmelCase: Optional[Any]=3 , _lowerCAmelCase: Optional[int]=None , ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =image_size
UpperCAmelCase_ =patch_size
UpperCAmelCase_ =num_channels
UpperCAmelCase_ =is_training
UpperCAmelCase_ =use_labels
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =num_attention_heads
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =type_sequence_label_size
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ =(image_size // patch_size) ** 2
UpperCAmelCase_ =num_patches + 1
def lowerCAmelCase__ ( self: Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ =None
if self.use_labels:
UpperCAmelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ =self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: int , _lowerCAmelCase: Any , _lowerCAmelCase: List[str] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =TFViTModel(config=_lowerCAmelCase )
UpperCAmelCase_ =model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ =self.image_size // 2
UpperCAmelCase_ =pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ =model(_lowerCAmelCase , interpolate_pos_encoding=_lowerCAmelCase , training=_lowerCAmelCase )
UpperCAmelCase_ =(image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.type_sequence_label_size
UpperCAmelCase_ =TFViTForImageClassification(_lowerCAmelCase )
UpperCAmelCase_ =model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ =self.image_size // 2
UpperCAmelCase_ =pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ =model(_lowerCAmelCase , interpolate_pos_encoding=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ =1
UpperCAmelCase_ =TFViTForImageClassification(_lowerCAmelCase )
UpperCAmelCase_ =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ =model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ ={"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class A ( __lowercase , __lowercase , unittest.TestCase ):
_snake_case =(TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
_snake_case =(
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
_snake_case =False
_snake_case =False
_snake_case =False
def lowerCAmelCase__ ( self: int ) -> int:
'''simple docstring'''
UpperCAmelCase_ =TFViTModelTester(self )
UpperCAmelCase_ =ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowerCAmelCase__ ( self: Dict ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowerCAmelCase__ ( self: int ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: List[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ =model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase_ =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , tf.keras.layers.Layer ) )
def lowerCAmelCase__ ( self: List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ =model_class(_lowerCAmelCase )
UpperCAmelCase_ =inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ =[*signature.parameters.keys()]
UpperCAmelCase_ =["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def lowerCAmelCase__ ( self: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def lowerCAmelCase__ ( self: Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =TFViTModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(_lowerCAmelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class A ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self: Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" )
UpperCAmelCase_ =self.default_image_processor
UpperCAmelCase_ =prepare_img()
UpperCAmelCase_ =image_processor(images=_lowerCAmelCase , return_tensors="tf" )
# forward pass
UpperCAmelCase_ =model(**_lowerCAmelCase )
# verify the logits
UpperCAmelCase_ =tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
UpperCAmelCase_ =tf.constant([-0.27_44, 0.82_15, -0.08_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 )
| 54 | 0 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCAmelCase__ ( lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any]=0.999 , lowerCAmelCase__ :Optional[int]="cosine" , ) -> Union[str, Any]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCAmelCase__ :str ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCAmelCase__ :str ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
lowercase = []
for i in range(lowercase__ ):
lowercase = i / num_diffusion_timesteps
lowercase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowercase__ ) / alpha_bar_fn(lowercase__ ) , lowercase__ ) )
return torch.tensor(lowercase__ , dtype=torch.floataa )
class _A ( __lowercase , __lowercase ):
snake_case__ : List[str] = [e.name for e in KarrasDiffusionSchedulers]
snake_case__ : Tuple = 2
@register_to_config
def __init__( self , __lowerCAmelCase = 1000 , __lowerCAmelCase = 0.0_0_0_8_5 , __lowerCAmelCase = 0.0_1_2 , __lowerCAmelCase = "linear" , __lowerCAmelCase = None , __lowerCAmelCase = "epsilon" , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = 1.0 , __lowerCAmelCase = "linspace" , __lowerCAmelCase = 0 , ):
"""simple docstring"""
if trained_betas is not None:
lowercase = torch.tensor(_lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
lowercase = torch.linspace(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowerCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase = betas_for_alpha_bar(_lowerCAmelCase , alpha_transform_type="""cosine""" )
elif beta_schedule == "exp":
lowercase = betas_for_alpha_bar(_lowerCAmelCase , alpha_transform_type="""exp""" )
else:
raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' )
lowercase = 1.0 - self.betas
lowercase = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase = use_karras_sigmas
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=None ):
"""simple docstring"""
if schedule_timesteps is None:
lowercase = self.timesteps
lowercase = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowercase = 1 if len(_lowerCAmelCase ) > 1 else 0
else:
lowercase = timestep.cpu().item() if torch.is_tensor(_lowerCAmelCase ) else timestep
lowercase = self._index_counter[timestep_int]
return indices[pos].item()
@property
def A__ ( self ):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
lowercase = self.index_for_timestep(_lowerCAmelCase )
lowercase = self.sigmas[step_index]
lowercase = sample / ((sigma**2 + 1) ** 0.5)
return sample
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , ):
"""simple docstring"""
lowercase = num_inference_steps
lowercase = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowercase = np.linspace(0 , num_train_timesteps - 1 , _lowerCAmelCase , dtype=_lowerCAmelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowercase = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase = (np.arange(0 , _lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(_lowerCAmelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowercase = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase = (np.arange(_lowerCAmelCase , 0 , -step_ratio )).round().copy().astype(_lowerCAmelCase )
timesteps -= 1
else:
raise ValueError(
f'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
lowercase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowercase = np.log(_lowerCAmelCase )
lowercase = np.interp(_lowerCAmelCase , np.arange(0 , len(_lowerCAmelCase ) ) , _lowerCAmelCase )
if self.config.use_karras_sigmas:
lowercase = self._convert_to_karras(in_sigmas=_lowerCAmelCase , num_inference_steps=self.num_inference_steps )
lowercase = np.array([self._sigma_to_t(_lowerCAmelCase , _lowerCAmelCase ) for sigma in sigmas] )
lowercase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowercase = torch.from_numpy(_lowerCAmelCase ).to(device=_lowerCAmelCase )
lowercase = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
lowercase = torch.from_numpy(_lowerCAmelCase )
lowercase = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(_lowerCAmelCase ).startswith("""mps""" ):
# mps does not support float64
lowercase = timesteps.to(_lowerCAmelCase , dtype=torch.floataa )
else:
lowercase = timesteps.to(device=_lowerCAmelCase )
# empty dt and derivative
lowercase = None
lowercase = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowercase = defaultdict(_lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = np.log(_lowerCAmelCase )
# get distribution
lowercase = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
lowercase = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
lowercase = low_idx + 1
lowercase = log_sigmas[low_idx]
lowercase = log_sigmas[high_idx]
# interpolate sigmas
lowercase = (low - log_sigma) / (low - high)
lowercase = np.clip(_lowerCAmelCase , 0 , 1 )
# transform interpolation to time range
lowercase = (1 - w) * low_idx + w * high_idx
lowercase = t.reshape(sigma.shape )
return t
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = in_sigmas[-1].item()
lowercase = in_sigmas[0].item()
lowercase = 7.0 # 7.0 is the value used in the paper
lowercase = np.linspace(0 , 1 , _lowerCAmelCase )
lowercase = sigma_min ** (1 / rho)
lowercase = sigma_max ** (1 / rho)
lowercase = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def A__ ( self ):
"""simple docstring"""
return self.dt is None
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = True , ):
"""simple docstring"""
lowercase = self.index_for_timestep(_lowerCAmelCase )
# advance index counter by 1
lowercase = timestep.cpu().item() if torch.is_tensor(_lowerCAmelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowercase = self.sigmas[step_index]
lowercase = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
lowercase = self.sigmas[step_index - 1]
lowercase = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowercase = 0
lowercase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowercase = sigma_hat if self.state_in_first_order else sigma_next
lowercase = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowercase = sigma_hat if self.state_in_first_order else sigma_next
lowercase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
lowercase = model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
lowercase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowercase = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowercase = sigma_next - sigma_hat
# store for 2nd order step
lowercase = derivative
lowercase = dt
lowercase = sample
else:
# 2. 2nd order / Heun's method
lowercase = (sample - pred_original_sample) / sigma_next
lowercase = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
lowercase = self.dt
lowercase = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
lowercase = None
lowercase = None
lowercase = None
lowercase = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
lowercase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_lowerCAmelCase ):
# mps does not support float64
lowercase = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowercase = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowercase = self.timesteps.to(original_samples.device )
lowercase = timesteps.to(original_samples.device )
lowercase = [self.index_for_timestep(_lowerCAmelCase , _lowerCAmelCase ) for t in timesteps]
lowercase = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowercase = sigma.unsqueeze(-1 )
lowercase = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 359 |
from __future__ import annotations
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) == 0:
return False
UpperCAmelCase_ =len(lowercase__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , lowercase__ )
else:
return binary_search(a_list[midpoint + 1 :] , lowercase__ )
if __name__ == "__main__":
__lowercase : Tuple =input("""Enter numbers separated by comma:\n""").strip()
__lowercase : Optional[Any] =[int(item.strip()) for item in user_input.split(""",""")]
__lowercase : List[Any] =int(input("""Enter the number to be found in the list:\n""").strip())
__lowercase : Optional[Any] ="""""" if binary_search(sequence, target) else """not """
print(f"""{target} was {not_str}found in {sequence}""")
| 54 | 0 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
A_ : Tuple = logging.get_logger(__name__)
# General docstring
A_ : Dict = """MobileNetV1Config"""
# Base docstring
A_ : Union[str, Any] = """google/mobilenet_v1_1.0_224"""
A_ : str = [1, 1_024, 7, 7]
# Image classification docstring
A_ : str = """google/mobilenet_v1_1.0_224"""
A_ : List[Any] = """tabby, tabby cat"""
A_ : Dict = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def A ( snake_case__ , snake_case__ , snake_case__=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {}
if isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE__ = model.mobilenet_va
else:
SCREAMING_SNAKE_CASE__ = model
SCREAMING_SNAKE_CASE__ = """MobilenetV1/Conv2d_0/"""
SCREAMING_SNAKE_CASE__ = backbone.conv_stem.convolution.weight
SCREAMING_SNAKE_CASE__ = backbone.conv_stem.normalization.bias
SCREAMING_SNAKE_CASE__ = backbone.conv_stem.normalization.weight
SCREAMING_SNAKE_CASE__ = backbone.conv_stem.normalization.running_mean
SCREAMING_SNAKE_CASE__ = backbone.conv_stem.normalization.running_var
for i in range(13 ):
SCREAMING_SNAKE_CASE__ = i + 1
SCREAMING_SNAKE_CASE__ = i * 2
SCREAMING_SNAKE_CASE__ = backbone.layer[pt_index]
SCREAMING_SNAKE_CASE__ = f"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
SCREAMING_SNAKE_CASE__ = pointer.convolution.weight
SCREAMING_SNAKE_CASE__ = pointer.normalization.bias
SCREAMING_SNAKE_CASE__ = pointer.normalization.weight
SCREAMING_SNAKE_CASE__ = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE__ = pointer.normalization.running_var
SCREAMING_SNAKE_CASE__ = backbone.layer[pt_index + 1]
SCREAMING_SNAKE_CASE__ = f"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
SCREAMING_SNAKE_CASE__ = pointer.convolution.weight
SCREAMING_SNAKE_CASE__ = pointer.normalization.bias
SCREAMING_SNAKE_CASE__ = pointer.normalization.weight
SCREAMING_SNAKE_CASE__ = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE__ = pointer.normalization.running_var
if isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE__ = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
SCREAMING_SNAKE_CASE__ = model.classifier.weight
SCREAMING_SNAKE_CASE__ = model.classifier.bias
return tf_to_pt_map
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
SCREAMING_SNAKE_CASE__ = tf.train.list_variables(lowercase__ )
SCREAMING_SNAKE_CASE__ = {}
for name, shape in init_vars:
logger.info(f"""Loading TF weight {name} with shape {shape}""" )
SCREAMING_SNAKE_CASE__ = tf.train.load_variable(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE__ = array
# Build TF to PyTorch weights loading map
SCREAMING_SNAKE_CASE__ = _build_tf_to_pytorch_map(lowercase__ , lowercase__ , lowercase__ )
for name, pointer in tf_to_pt_map.items():
logger.info(f"""Importing {name}""" )
if name not in tf_weights:
logger.info(f"""{name} not in tf pre-trained weights, skipping""" )
continue
SCREAMING_SNAKE_CASE__ = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
SCREAMING_SNAKE_CASE__ = np.transpose(lowercase__ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
SCREAMING_SNAKE_CASE__ = array.squeeze().transpose()
else:
SCREAMING_SNAKE_CASE__ = np.transpose(lowercase__ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(f"""Initialize PyTorch weight {name} {array.shape}""" )
SCREAMING_SNAKE_CASE__ = torch.from_numpy(lowercase__ )
tf_weights.pop(lowercase__ , lowercase__ )
tf_weights.pop(name + """/RMSProp""" , lowercase__ )
tf_weights.pop(name + """/RMSProp_1""" , lowercase__ )
tf_weights.pop(name + """/ExponentialMovingAverage""" , lowercase__ )
logger.info(f"""Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}""" )
return model
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = features.shape[-2:]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = conv_layer.stride
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = conv_layer.kernel_size
if in_height % stride_height == 0:
SCREAMING_SNAKE_CASE__ = max(kernel_height - stride_height , 0 )
else:
SCREAMING_SNAKE_CASE__ = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
SCREAMING_SNAKE_CASE__ = max(kernel_width - stride_width , 0 )
else:
SCREAMING_SNAKE_CASE__ = max(kernel_width - (in_width % stride_width) , 0 )
SCREAMING_SNAKE_CASE__ = pad_along_width // 2
SCREAMING_SNAKE_CASE__ = pad_along_width - pad_left
SCREAMING_SNAKE_CASE__ = pad_along_height // 2
SCREAMING_SNAKE_CASE__ = pad_along_height - pad_top
SCREAMING_SNAKE_CASE__ = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(lowercase__ , lowercase__ , """constant""" , 0.0 )
class lowerCamelCase (nn.Module ):
def __init__( self : Optional[Any] , __UpperCAmelCase : MobileNetVaConfig , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[bool] = True , __UpperCAmelCase : Optional[bool or str] = True , ) -> None:
super().__init__()
SCREAMING_SNAKE_CASE__ = config
if in_channels % groups != 0:
raise ValueError(F"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(F"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
SCREAMING_SNAKE_CASE__ = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
SCREAMING_SNAKE_CASE__ = nn.Convad(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , kernel_size=_lowerCAmelCase , stride=_lowerCAmelCase , padding=_lowerCAmelCase , groups=_lowerCAmelCase , bias=_lowerCAmelCase , padding_mode="""zeros""" , )
if use_normalization:
SCREAMING_SNAKE_CASE__ = nn.BatchNormad(
num_features=_lowerCAmelCase , eps=config.layer_norm_eps , momentum=0.9_997 , affine=_lowerCAmelCase , track_running_stats=_lowerCAmelCase , )
else:
SCREAMING_SNAKE_CASE__ = None
if use_activation:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ = ACTaFN[use_activation]
elif isinstance(config.hidden_act , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE__ = config.hidden_act
else:
SCREAMING_SNAKE_CASE__ = None
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : torch.Tensor ) -> torch.Tensor:
if self.config.tf_padding:
SCREAMING_SNAKE_CASE__ = apply_tf_padding(_lowerCAmelCase , self.convolution )
SCREAMING_SNAKE_CASE__ = self.convolution(_lowerCAmelCase )
if self.normalization is not None:
SCREAMING_SNAKE_CASE__ = self.normalization(_lowerCAmelCase )
if self.activation is not None:
SCREAMING_SNAKE_CASE__ = self.activation(_lowerCAmelCase )
return features
class lowerCamelCase (__lowercase ):
lowerCamelCase__ : str = MobileNetVaConfig
lowerCamelCase__ : List[str] = load_tf_weights_in_mobilenet_va
lowerCamelCase__ : Any = 'mobilenet_v1'
lowerCamelCase__ : Optional[int] = 'pixel_values'
lowerCamelCase__ : Tuple = False
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : Union[nn.Linear, nn.Convad] ) -> None:
if isinstance(_lowerCAmelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_lowerCAmelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
A_ : Union[str, Any] = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
A_ : List[Any] = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.' ,__lowercase ,)
class lowerCamelCase (__lowercase ):
def __init__( self : str , __UpperCAmelCase : MobileNetVaConfig , __UpperCAmelCase : bool = True ) -> Any:
super().__init__(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = config
SCREAMING_SNAKE_CASE__ = 3_2
SCREAMING_SNAKE_CASE__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
SCREAMING_SNAKE_CASE__ = MobileNetVaConvLayer(
_lowerCAmelCase , in_channels=config.num_channels , out_channels=_lowerCAmelCase , kernel_size=3 , stride=2 , )
SCREAMING_SNAKE_CASE__ = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
SCREAMING_SNAKE_CASE__ = nn.ModuleList()
for i in range(1_3 ):
SCREAMING_SNAKE_CASE__ = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
SCREAMING_SNAKE_CASE__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
_lowerCAmelCase , in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , kernel_size=3 , stride=strides[i] , groups=_lowerCAmelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
_lowerCAmelCase , in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , kernel_size=1 , ) )
SCREAMING_SNAKE_CASE__ = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : str ) -> List[str]:
raise NotImplementedError
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
SCREAMING_SNAKE_CASE__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
SCREAMING_SNAKE_CASE__ = self.conv_stem(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
SCREAMING_SNAKE_CASE__ = layer_module(_lowerCAmelCase )
if output_hidden_states:
SCREAMING_SNAKE_CASE__ = all_hidden_states + (hidden_states,)
SCREAMING_SNAKE_CASE__ = hidden_states
if self.pooler is not None:
SCREAMING_SNAKE_CASE__ = torch.flatten(self.pooler(_lowerCAmelCase ) , start_dim=1 )
else:
SCREAMING_SNAKE_CASE__ = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_lowerCAmelCase , pooler_output=_lowerCAmelCase , hidden_states=_lowerCAmelCase , )
@add_start_docstrings(
'\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' ,__lowercase ,)
class lowerCamelCase (__lowercase ):
def __init__( self : Dict , __UpperCAmelCase : MobileNetVaConfig ) -> None:
super().__init__(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = config.num_labels
SCREAMING_SNAKE_CASE__ = MobileNetVaModel(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
SCREAMING_SNAKE_CASE__ = nn.Dropout(config.classifier_dropout_prob , inplace=_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = nn.Linear(_lowerCAmelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[bool] = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE__ = self.mobilenet_va(_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE__ = self.classifier(self.dropout(_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE__ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE__ = """single_label_classification"""
else:
SCREAMING_SNAKE_CASE__ = """multi_label_classification"""
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE__ = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
SCREAMING_SNAKE_CASE__ = loss_fct(_lowerCAmelCase , _lowerCAmelCase )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE__ = CrossEntropyLoss()
SCREAMING_SNAKE_CASE__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE__ = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE__ = loss_fct(_lowerCAmelCase , _lowerCAmelCase )
if not return_dict:
SCREAMING_SNAKE_CASE__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=_lowerCAmelCase , logits=_lowerCAmelCase , hidden_states=outputs.hidden_states , )
| 196 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__lowercase : Any =(
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
__lowercase : Union[str, Any] =(
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
__lowercase : List[str] =(
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
__lowercase : str =(
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
__lowercase : Union[str, Any] =(
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 14]),
("""2H 5D 3C AS 5S""", False, [14, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [14, 13, 12, 11, 10]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
__lowercase : str =(
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
__lowercase : int =(
("""JH AH TH KH QH""", 23),
("""JH 9H TH KH QH""", 22),
("""JC KH JS JD JH""", 21),
("""KH KC 3S 3H 3D""", 20),
("""8C 9C 5C 3C TC""", 19),
("""JS QS 9H TS KH""", 18),
("""7C 7S KH 2H 7H""", 17),
("""3C KH 5D 5S KH""", 16),
("""QH 8H KD JH 8S""", 15),
("""2D 6D 9D TH 7D""", 14),
)
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =randrange(len(lowercase__ ) ), randrange(len(lowercase__ ) )
UpperCAmelCase_ =["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)]
UpperCAmelCase_ , UpperCAmelCase_ =SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def a__ ( lowercase__ = 1_0_0 ):
'''simple docstring'''
return (generate_random_hand() for _ in range(lowercase__ ))
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_flush() == expected
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_straight() == expected
@pytest.mark.parametrize("hand, expected, card_values" , lowercase__ )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =PokerHand(lowercase__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_same_kind() == expected
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._hand_type == expected
@pytest.mark.parametrize("hand, other, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
@pytest.mark.parametrize("hand, other, expected" , generate_random_hands() )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[PokerHand(lowercase__ ) for hand in SORTED_HANDS]
UpperCAmelCase_ =poker_hands.copy()
shuffle(lowercase__ )
UpperCAmelCase_ =chain(sorted(lowercase__ ) )
for index, hand in enumerate(lowercase__ ):
assert hand == poker_hands[index]
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[PokerHand("2D AC 3H 4H 5S" ), PokerHand("2S 3H 4H 5S 6C" )]
pokerhands.sort(reverse=lowercase__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =PokerHand("2C 4S AS 3D 5C" )
UpperCAmelCase_ =True
UpperCAmelCase_ =[5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =0
UpperCAmelCase_ =os.path.abspath(os.path.dirname(lowercase__ ) )
UpperCAmelCase_ =os.path.join(lowercase__ , "poker_hands.txt" )
with open(lowercase__ ) as file_hand:
for line in file_hand:
UpperCAmelCase_ =line[:1_4].strip()
UpperCAmelCase_ =line[1_5:].strip()
UpperCAmelCase_ , UpperCAmelCase_ =PokerHand(lowercase__ ), PokerHand(lowercase__ )
UpperCAmelCase_ =player.compare_with(lowercase__ )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 54 | 0 |
"""simple docstring"""
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = os.path.abspath(lowercase__ )
logger.info(f"""Converting TensorFlow checkpoint from {tf_path}""" )
# Load weights from TF model
_lowerCAmelCase : Optional[Any] = tf.train.list_variables(lowercase__ )
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : int = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
_lowerCAmelCase : Optional[Any] = full_name.split('/' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f"""Skipping non-model layer {full_name}""" )
continue
if "optimizer" in full_name:
logger.info(f"""Skipping optimization layer {full_name}""" )
continue
if name[0] == "model":
# ignore initial 'model'
_lowerCAmelCase : Union[str, Any] = name[1:]
# figure out how many levels deep the name is
_lowerCAmelCase : str = 0
for _name in name:
if _name.startswith('layer_with_weights' ):
depth += 1
else:
break
layer_depth.append(lowercase__ )
# read data
_lowerCAmelCase : Union[str, Any] = tf.train.load_variable(lowercase__ , lowercase__ )
names.append('/'.join(lowercase__ ) )
arrays.append(lowercase__ )
logger.info(f"""Read a total of {len(lowercase__ ):,} layers""" )
# Sanity check
if len(set(lowercase__ ) ) != 1:
raise ValueError(f"""Found layer names with different depths (layer depth {list(set(lowercase__ ) )})""" )
_lowerCAmelCase : Dict = list(set(lowercase__ ) )[0]
if layer_depth != 1:
raise ValueError(
'The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'
' heads.' )
# convert layers
logger.info('Converting weights...' )
for full_name, array in zip(lowercase__ , lowercase__ ):
_lowerCAmelCase : Dict = full_name.split('/' )
_lowerCAmelCase : Optional[Any] = model
_lowerCAmelCase : List[Any] = []
for i, m_name in enumerate(lowercase__ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('layer_with_weights' ):
_lowerCAmelCase : Optional[int] = int(m_name.split('-' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['embeddings', 'LayerNorm'] )
_lowerCAmelCase : Optional[Any] = getattr(lowercase__ , 'embeddings' )
_lowerCAmelCase : List[Any] = getattr(lowercase__ , 'LayerNorm' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['encoder', 'layer', str(layer_num - 4 )] )
_lowerCAmelCase : int = getattr(lowercase__ , 'encoder' )
_lowerCAmelCase : Optional[int] = getattr(lowercase__ , 'layer' )
_lowerCAmelCase : int = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['pooler', 'dense'] )
_lowerCAmelCase : List[Any] = getattr(lowercase__ , 'pooler' )
_lowerCAmelCase : List[str] = getattr(lowercase__ , 'dense' )
elif m_name == "embeddings":
trace.append('embeddings' )
_lowerCAmelCase : List[Any] = getattr(lowercase__ , 'embeddings' )
if layer_num == 0:
trace.append('word_embeddings' )
_lowerCAmelCase : Tuple = getattr(lowercase__ , 'word_embeddings' )
elif layer_num == 1:
trace.append('position_embeddings' )
_lowerCAmelCase : List[Any] = getattr(lowercase__ , 'position_embeddings' )
elif layer_num == 2:
trace.append('token_type_embeddings' )
_lowerCAmelCase : int = getattr(lowercase__ , 'token_type_embeddings' )
else:
raise ValueError(f"""Unknown embedding layer with name {full_name}""" )
trace.append('weight' )
_lowerCAmelCase : Optional[Any] = getattr(lowercase__ , 'weight' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['attention', 'self'] )
_lowerCAmelCase : Optional[Any] = getattr(lowercase__ , 'attention' )
_lowerCAmelCase : List[Any] = getattr(lowercase__ , 'self' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['attention', 'output', 'LayerNorm'] )
_lowerCAmelCase : str = getattr(lowercase__ , 'attention' )
_lowerCAmelCase : Union[str, Any] = getattr(lowercase__ , 'output' )
_lowerCAmelCase : List[str] = getattr(lowercase__ , 'LayerNorm' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['attention', 'output', 'dense'] )
_lowerCAmelCase : Optional[Any] = getattr(lowercase__ , 'attention' )
_lowerCAmelCase : List[str] = getattr(lowercase__ , 'output' )
_lowerCAmelCase : Tuple = getattr(lowercase__ , 'dense' )
elif m_name == "_output_dense":
# output dense
trace.extend(['output', 'dense'] )
_lowerCAmelCase : str = getattr(lowercase__ , 'output' )
_lowerCAmelCase : Dict = getattr(lowercase__ , 'dense' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['output', 'LayerNorm'] )
_lowerCAmelCase : List[Any] = getattr(lowercase__ , 'output' )
_lowerCAmelCase : Optional[Any] = getattr(lowercase__ , 'LayerNorm' )
elif m_name == "_key_dense":
# attention key
trace.append('key' )
_lowerCAmelCase : List[str] = getattr(lowercase__ , 'key' )
elif m_name == "_query_dense":
# attention query
trace.append('query' )
_lowerCAmelCase : List[Any] = getattr(lowercase__ , 'query' )
elif m_name == "_value_dense":
# attention value
trace.append('value' )
_lowerCAmelCase : Dict = getattr(lowercase__ , 'value' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['intermediate', 'dense'] )
_lowerCAmelCase : Any = getattr(lowercase__ , 'intermediate' )
_lowerCAmelCase : List[str] = getattr(lowercase__ , 'dense' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('output' )
_lowerCAmelCase : Tuple = getattr(lowercase__ , 'output' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('bias' )
_lowerCAmelCase : Union[str, Any] = getattr(lowercase__ , 'bias' )
elif m_name in ["kernel", "gamma"]:
trace.append('weight' )
_lowerCAmelCase : List[str] = getattr(lowercase__ , 'weight' )
else:
logger.warning(f"""Ignored {m_name}""" )
# for certain layers reshape is necessary
_lowerCAmelCase : Dict = '.'.join(lowercase__ )
if re.match(R'(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)' , lowercase__ ) or re.match(
R'(\S+)\.attention\.output\.dense\.weight' , lowercase__ ):
_lowerCAmelCase : Any = array.reshape(pointer.data.shape )
if "kernel" in full_name:
_lowerCAmelCase : Tuple = array.transpose()
if pointer.shape == array.shape:
_lowerCAmelCase : Optional[int] = torch.from_numpy(lowercase__ )
else:
raise ValueError(
f"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"""
f""" {array.shape}""" )
logger.info(f"""Successfully set variable {full_name} to PyTorch layer {trace}""" )
return model
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
logger.info(f"""Loading model based on config from {config_path}...""" )
_lowerCAmelCase : List[str] = BertConfig.from_json_file(lowercase__ )
_lowerCAmelCase : Optional[int] = BertModel(lowercase__ )
# Load weights from checkpoint
logger.info(f"""Loading weights from checkpoint {tf_checkpoint_path}...""" )
load_tfa_weights_in_bert(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
logger.info(f"""Saving PyTorch model to {pytorch_dump_path}...""" )
torch.save(model.state_dict() , lowercase__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow 2.x checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model (must include filename).""",
)
_lowerCAmelCase = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 259 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowercase : int =logging.get_logger(__name__)
class A ( __lowercase ):
_snake_case =['''pixel_values''']
def __init__( self: List[Any] , _lowerCAmelCase: bool = True , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: float = None , _lowerCAmelCase: PILImageResampling = PILImageResampling.BILINEAR , _lowerCAmelCase: bool = True , _lowerCAmelCase: Union[int, float] = 1 / 255 , _lowerCAmelCase: bool = True , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , **_lowerCAmelCase: Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
UpperCAmelCase_ =size if size is not None else {"shortest_edge": 384}
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =do_resize
UpperCAmelCase_ =size
# Default value set here for backwards compatibility where the value in config is None
UpperCAmelCase_ =crop_pct if crop_pct is not None else 224 / 256
UpperCAmelCase_ =resample
UpperCAmelCase_ =do_rescale
UpperCAmelCase_ =rescale_factor
UpperCAmelCase_ =do_normalize
UpperCAmelCase_ =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ =image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Dict[str, int] , _lowerCAmelCase: float , _lowerCAmelCase: PILImageResampling = PILImageResampling.BICUBIC , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Any , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
UpperCAmelCase_ =size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
UpperCAmelCase_ =int(shortest_edge / crop_pct )
UpperCAmelCase_ =get_resize_output_image_size(_lowerCAmelCase , size=_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_lowerCAmelCase , size=(shortest_edge, shortest_edge) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_lowerCAmelCase , size=(shortest_edge, shortest_edge) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Union[int, float] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: str , ) -> Optional[Any]:
'''simple docstring'''
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Union[float, List[float]] , _lowerCAmelCase: Union[float, List[float]] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Dict , ) -> np.ndarray:
'''simple docstring'''
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: ImageInput , _lowerCAmelCase: bool = None , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: float = None , _lowerCAmelCase: PILImageResampling = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: float = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[str, TensorType]] = None , _lowerCAmelCase: ChannelDimension = ChannelDimension.FIRST , **_lowerCAmelCase: Optional[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCAmelCase_ =do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ =crop_pct if crop_pct is not None else self.crop_pct
UpperCAmelCase_ =resample if resample is not None else self.resample
UpperCAmelCase_ =do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ =do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ =image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ =image_std if image_std is not None else self.image_std
UpperCAmelCase_ =size if size is not None else self.size
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ =[to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
UpperCAmelCase_ =[self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , crop_pct=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_rescale:
UpperCAmelCase_ =[self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
if do_normalize:
UpperCAmelCase_ =[self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images]
UpperCAmelCase_ =[to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
UpperCAmelCase_ ={"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 54 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Tuple = {
"""configuration_time_series_transformer""": [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TimeSeriesTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimeSeriesTransformerForPrediction""",
"""TimeSeriesTransformerModel""",
"""TimeSeriesTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 429 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__lowercase : List[Any] =WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =test_results.split(" " )
UpperCAmelCase_ =0
UpperCAmelCase_ =0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
UpperCAmelCase_ =expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase__ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={}
UpperCAmelCase_ =None
UpperCAmelCase_ =False
for line in failures_short_lines.split("\n" ):
if re.search(R"_ \[doctest\]" , lowercase__ ):
UpperCAmelCase_ =True
UpperCAmelCase_ =line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
UpperCAmelCase_ =line
UpperCAmelCase_ =False
return failures
class A :
def __init__( self: Optional[Any] , _lowerCAmelCase: str , _lowerCAmelCase: Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =title
UpperCAmelCase_ =doc_test_results["time_spent"].split("," )[0]
UpperCAmelCase_ =doc_test_results["success"]
UpperCAmelCase_ =doc_test_results["failures"]
UpperCAmelCase_ =self.n_success + self.n_failures
# Failures and success of the modeling tests
UpperCAmelCase_ =doc_test_results
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =[self._time_spent]
UpperCAmelCase_ =0
for time in time_spent:
UpperCAmelCase_ =time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_lowerCAmelCase ) == 1:
UpperCAmelCase_ =[0, 0, time_parts[0]]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F'{int(_lowerCAmelCase )}h{int(_lowerCAmelCase )}m{int(_lowerCAmelCase )}s'
@property
def lowerCAmelCase__ ( self: int ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
F' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def lowerCAmelCase__ ( self: Tuple ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =40
UpperCAmelCase_ ={k: v["failed"] for k, v in doc_test_results.items() if isinstance(_lowerCAmelCase , _lowerCAmelCase )}
UpperCAmelCase_ =""
for category, failures in category_failures.items():
if len(_lowerCAmelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += F'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_lowerCAmelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =[self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_lowerCAmelCase )
@staticmethod
def lowerCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =[
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(_lowerCAmelCase )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=_lowerCAmelCase , )
def lowerCAmelCase__ ( self: Dict ) -> List[str]:
'''simple docstring'''
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
UpperCAmelCase_ =F'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else "All tests passed."
UpperCAmelCase_ =client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=_lowerCAmelCase , )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[str] , _lowerCAmelCase: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =""
for key, value in failures.items():
UpperCAmelCase_ =value[:200] + " [Truncated]" if len(_lowerCAmelCase ) > 250 else value
failures_text += F'*{key}*\n_{value}_\n\n'
UpperCAmelCase_ =job_name
UpperCAmelCase_ ={"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
UpperCAmelCase_ ={
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowerCAmelCase__ ( self: Any ) -> List[str]:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
UpperCAmelCase_ =self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
UpperCAmelCase_ =sorted(self.doc_test_results.items() , key=lambda _lowerCAmelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
UpperCAmelCase_ =F'*Num failures* :{len(job_result["failed"] )} \n'
UpperCAmelCase_ =job_result["failures"]
UpperCAmelCase_ =self.get_reply_blocks(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , text=_lowerCAmelCase )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=F'Results for {job}' , blocks=_lowerCAmelCase , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =os.environ["GITHUB_RUN_ID"]
UpperCAmelCase_ =F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
UpperCAmelCase_ =requests.get(lowercase__ ).json()
UpperCAmelCase_ ={}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
UpperCAmelCase_ =math.ceil((result["total_count"] - 1_0_0) / 1_0_0 )
for i in range(lowercase__ ):
UpperCAmelCase_ =requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , lowercase__ )
return {}
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={}
if os.path.exists(lowercase__ ):
UpperCAmelCase_ =os.listdir(lowercase__ )
for file in files:
try:
with open(os.path.join(lowercase__ , lowercase__ ) , encoding="utf-8" ) as f:
UpperCAmelCase_ =f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowercase__ , lowercase__ )}.' ) from e
return _artifact
def a__ ( ):
'''simple docstring'''
class A :
def __init__( self: Tuple , _lowerCAmelCase: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =name
UpperCAmelCase_ =[]
def __str__( self: Optional[int] ) -> Tuple:
'''simple docstring'''
return self.name
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: str ) -> List[Any]:
'''simple docstring'''
self.paths.append({"name": self.name, "path": path} )
UpperCAmelCase_ ={}
UpperCAmelCase_ =filter(os.path.isdir , os.listdir() )
for directory in directories:
UpperCAmelCase_ =directory
if artifact_name not in _available_artifacts:
UpperCAmelCase_ =Artifact(lowercase__ )
_available_artifacts[artifact_name].add_path(lowercase__ )
return _available_artifacts
if __name__ == "__main__":
__lowercase : str =get_job_links()
__lowercase : Dict =retrieve_available_artifacts()
__lowercase : Optional[int] =collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__lowercase : Any ={
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__lowercase : Tuple =github_actions_job_links.get("""run_doctests""")
__lowercase : int =available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
__lowercase : str =retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
__lowercase , __lowercase , __lowercase : Tuple =handle_test_results(artifact["""stats"""])
__lowercase : int =failed
__lowercase : int =success
__lowercase : str =time_spent[1:-1] + """, """
__lowercase : str =extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
__lowercase : int =line.replace("""FAILED """, """""")
__lowercase : List[Any] =line.split()[0].replace("""\n""", """""")
if "::" in line:
__lowercase , __lowercase : Any =line.split("""::""")
else:
__lowercase , __lowercase : Dict =line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__lowercase : Optional[int] =docs[file_regex]
doc_test_results[category]["failed"].append(test)
__lowercase : Tuple =all_failures[test] if test in all_failures else """N/A"""
__lowercase : Optional[int] =failure
break
__lowercase : Optional[int] =Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 54 | 0 |
'''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"""
),
}
class a__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : Dict = 'xlm-prophetnet'
_SCREAMING_SNAKE_CASE : Union[str, Any] = ['past_key_values']
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'num_attention_heads': 'num_encoder_attention_heads',
}
def __init__( self , _UpperCamelCase = 0.1 , _UpperCamelCase = "gelu" , _UpperCamelCase = 30522 , _UpperCamelCase = 1024 , _UpperCamelCase = 4096 , _UpperCamelCase = 12 , _UpperCamelCase = 16 , _UpperCamelCase = 4096 , _UpperCamelCase = 12 , _UpperCamelCase = 16 , _UpperCamelCase = 0.1 , _UpperCamelCase = 0.1 , _UpperCamelCase = 512 , _UpperCamelCase = 0.0_2 , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 0 , _UpperCamelCase = 2 , _UpperCamelCase = 32 , _UpperCamelCase = 128 , _UpperCamelCase = False , _UpperCamelCase = 0.0 , _UpperCamelCase = True , _UpperCamelCase = 0 , _UpperCamelCase = 1 , _UpperCamelCase = 2 , **_UpperCamelCase , ):
"""simple docstring"""
_lowercase : Any = vocab_size
_lowercase : str = hidden_size
_lowercase : Any = encoder_ffn_dim
_lowercase : Tuple = num_encoder_layers
_lowercase : Tuple = num_encoder_attention_heads
_lowercase : List[str] = decoder_ffn_dim
_lowercase : int = num_decoder_layers
_lowercase : List[str] = num_decoder_attention_heads
_lowercase : Any = max_position_embeddings
_lowercase : int = init_std # Normal(0, this parameter)
_lowercase : List[Any] = activation_function
# parameters for xlmprophetnet
_lowercase : int = ngram
_lowercase : Tuple = num_buckets
_lowercase : Optional[Any] = relative_max_distance
_lowercase : List[Any] = disable_ngram_loss
_lowercase : Union[str, Any] = eps
# 3 Types of Dropout
_lowercase : List[Any] = attention_dropout
_lowercase : Optional[int] = activation_dropout
_lowercase : Union[str, Any] = dropout
_lowercase : Tuple = use_cache
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , add_cross_attention=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"
" `num_decoder_layers`." )
| 245 |
def a__ ( lowercase__ = 2_0_0 ):
'''simple docstring'''
UpperCAmelCase_ =[1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
UpperCAmelCase_ =[0] * (pence + 1)
UpperCAmelCase_ =1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowercase__ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 7_3682
| 54 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.