code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_snake_case : Optional[int] = 2
class a :
"""simple docstring"""
def __init__( self : int , *, # begin keyword-only arguments
lowerCamelCase : List[Any]="<s>" , lowerCamelCase : List[Any]="<pad>" , lowerCamelCase : Union[str, Any]="</s>" , lowerCamelCase : Any="<unk>" , lowerCamelCase : Optional[Any]=None , ) -> Optional[int]:
__snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = bos, unk, pad, eos
__snake_case : Tuple = []
__snake_case : Dict = []
__snake_case : List[str] = {}
__snake_case : Tuple = self.add_symbol(lowerCamelCase )
__snake_case : List[Any] = self.add_symbol(lowerCamelCase )
__snake_case : Dict = self.add_symbol(lowerCamelCase )
__snake_case : List[str] = self.add_symbol(lowerCamelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(lowerCamelCase )
__snake_case : Any = len(self.symbols )
def __eq__( self : Any , lowerCamelCase : Optional[Any] ) -> int:
return self.indices == other.indices
def __getitem__( self : Union[str, Any] , lowerCamelCase : Dict ) -> Any:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : int ) -> Optional[int]:
return len(self.symbols )
def __contains__( self : str , lowerCamelCase : str ) -> Any:
return sym in self.indices
@classmethod
def __snake_case ( cls : Union[str, Any] , lowerCamelCase : Optional[Any] ) -> List[str]:
__snake_case : Optional[Any] = cls()
d.add_from_file(lowerCamelCase )
return d
def __snake_case ( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : List[str]=1 , lowerCamelCase : int=False ) -> Union[str, Any]:
if word in self.indices and not overwrite:
__snake_case : Optional[int] = self.indices[word]
__snake_case : Optional[Any] = self.count[idx] + n
return idx
else:
__snake_case : Dict = len(self.symbols )
__snake_case : List[Any] = idx
self.symbols.append(lowerCamelCase )
self.count.append(lowerCamelCase )
return idx
def __snake_case ( self : Optional[Any] , lowerCamelCase : Tuple ) -> Any:
return 0
def __snake_case ( self : str , lowerCamelCase : Tuple ) -> List[str]:
if isinstance(lowerCamelCase , lowerCamelCase ):
try:
with open(lowerCamelCase , "r" , encoding="utf-8" ) as fd:
self.add_from_file(lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(lowerCamelCase ) )
return
__snake_case : Dict = f.readlines()
__snake_case : Dict = self._load_meta(lowerCamelCase )
for line in lines[indices_start_line:]:
try:
__snake_case , __snake_case : Union[str, Any] = line.rstrip().rsplit(" " , 1 )
if field == "#fairseq:overwrite":
__snake_case : int = True
__snake_case , __snake_case : List[str] = line.rsplit(" " , 1 )
else:
__snake_case : Union[str, Any] = False
__snake_case : Union[str, Any] = int(lowerCamelCase )
__snake_case : Dict = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(lowerCamelCase ) )
self.add_symbol(lowerCamelCase , n=lowerCamelCase , overwrite=lowerCamelCase )
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" )
def lowerCAmelCase_ ( __lowerCamelCase ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
__snake_case : List[Any] = dict((re.sub(R"@@$" , "" , __lowerCamelCase ), v) if k.endswith("@@" ) else (re.sub(R"$" , "</w>" , __lowerCamelCase ), v) for k, v in d.items() )
__snake_case : str = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
__snake_case : Optional[int] = d[k] # restore
return da
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
# prep
if not os.path.exists(__lowerCamelCase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
__snake_case : Dict = os.path.join(__lowerCamelCase , "checkpoint.pt" )
if not os.path.isfile(__lowerCamelCase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
__snake_case : str = torch.load(__lowerCamelCase , map_location="cpu" )
__snake_case : List[Any] = chkpt["cfg"]["model"]
# dicts
__snake_case : str = os.path.join(__lowerCamelCase , "dict.txt" )
if not os.path.isfile(__lowerCamelCase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
__snake_case : int = Dictionary.load(__lowerCamelCase )
__snake_case : Tuple = rewrite_dict_keys(src_dict.indices )
__snake_case : Tuple = len(__lowerCamelCase )
__snake_case : Optional[Any] = os.path.join(__lowerCamelCase , VOCAB_FILES_NAMES["vocab_file"] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__lowerCamelCase , ensure_ascii=__lowerCamelCase , indent=__lowerCamelCase ) )
# merges_file (bpecodes)
__snake_case : Dict = os.path.join(__lowerCamelCase , "bpecodes" )
if not os.path.isfile(__lowerCamelCase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
__snake_case : List[Any] = os.path.join(__lowerCamelCase , VOCAB_FILES_NAMES["merges_file"] )
shutil.copyfile(__lowerCamelCase , __lowerCamelCase )
# model config
__snake_case : List[str] = os.path.join(__lowerCamelCase , "config.json" )
__snake_case : Optional[Any] = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.0_2,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1e-12,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__lowerCamelCase , ensure_ascii=__lowerCamelCase , indent=__lowerCamelCase ) )
# tokenizer config
__snake_case : Tuple = os.path.join(__lowerCamelCase , __lowerCamelCase )
__snake_case : int = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 1_0_2_4,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__lowerCamelCase , ensure_ascii=__lowerCamelCase , indent=__lowerCamelCase ) )
# model
__snake_case : Dict = chkpt["model"]
# remove unneeded keys
__snake_case : int = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(__lowerCamelCase , __lowerCamelCase )
__snake_case : Optional[int] = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight" ):
__snake_case : str = model_state_dict.pop(__lowerCamelCase )
else:
__snake_case : Any = model_state_dict.pop(__lowerCamelCase )
__snake_case : Dict = BioGptConfig.from_pretrained(__lowerCamelCase )
__snake_case : Any = BioGptForCausalLM(__lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(__lowerCamelCase )
# save
__snake_case : List[str] = os.path.join(__lowerCamelCase , __lowerCamelCase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(__lowerCamelCase , __lowerCamelCase )
print("Conversion is done!" )
if __name__ == "__main__":
_snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--biogpt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case : Union[str, Any] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 81 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : str ,A : int ,A : int=2 ,A : Optional[Any]=3 ,A : Dict=4 ,A : Optional[int]=2 ,A : Union[str, Any]=7 ,A : List[str]=True ,A : Union[str, Any]=True ,A : Optional[int]=True ,A : Optional[int]=True ,A : Tuple=99 ,A : Optional[int]=36 ,A : Dict=3 ,A : str=4 ,A : Optional[Any]=37 ,A : Dict="gelu" ,A : Dict=0.1 ,A : Union[str, Any]=0.1 ,A : Union[str, Any]=5_12 ,A : Any=16 ,A : Union[str, Any]=2 ,A : List[Any]=0.02 ,A : List[Any]=6 ,A : Optional[int]=6 ,A : List[Any]=3 ,A : Union[str, Any]=4 ,A : Tuple=None ,A : List[str]=10_00 ,):
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = patch_size
__A = text_seq_length
__A = is_training
__A = use_input_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = coordinate_size
__A = shape_size
__A = num_labels
__A = num_choices
__A = scope
__A = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__A = text_seq_length
__A = (image_size // patch_size) ** 2 + 1
__A = self.text_seq_length + self.image_seq_length
def UpperCamelCase_ ( self : int ):
__A = ids_tensor([self.batch_size, self.text_seq_length] ,self.vocab_size )
__A = ids_tensor([self.batch_size, self.text_seq_length, 4] ,self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__A = bbox[i, j, 3]
__A = bbox[i, j, 1]
__A = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__A = bbox[i, j, 2]
__A = bbox[i, j, 0]
__A = t
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_input_mask:
__A = random_attention_mask([self.batch_size, self.text_seq_length] )
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.text_seq_length] ,self.type_vocab_size )
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.text_seq_length] ,self.num_labels )
__A = LayoutLMvaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,coordinate_size=self.coordinate_size ,shape_size=self.shape_size ,input_size=self.image_size ,patch_size=self.patch_size ,)
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase_ ( self : Optional[int] ,A : List[str] ,A : Any ,A : Dict ,A : List[Any] ,A : Optional[int] ,A : Any ,A : Dict ,A : List[Any] ):
__A = LayoutLMvaModel(config=A )
model.to(A )
model.eval()
# text + image
__A = model(A ,pixel_values=A )
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A )
__A = model(A ,bbox=A ,pixel_values=A ,token_type_ids=A )
__A = model(A ,bbox=A ,pixel_values=A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
# text only
__A = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__A = model(pixel_values=A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Optional[int] ,A : Dict ,A : List[str] ,A : Any ,A : List[Any] ,A : Any ,A : Any ,A : Dict ,A : Optional[Any] ):
__A = self.num_labels
__A = LayoutLMvaForSequenceClassification(A )
model.to(A )
model.eval()
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A ,labels=A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : str ,A : Optional[Any] ,A : Dict ,A : str ,A : Tuple ,A : Union[str, Any] ,A : List[Any] ,A : Any ,A : Union[str, Any] ):
__A = self.num_labels
__A = LayoutLMvaForTokenClassification(config=A )
model.to(A )
model.eval()
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A ,labels=A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Optional[int] ,A : Optional[Any] ,A : int ,A : str ,A : List[str] ,A : int ,A : List[str] ,A : List[str] ,A : Dict ):
__A = LayoutLMvaForQuestionAnswering(config=A )
model.to(A )
model.eval()
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A ,start_positions=A ,end_positions=A ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : str ):
__A = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = config_and_inputs
__A = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case_ = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self : str ,A : Any ,A : Any ,A : Tuple ,A : List[Any] ,A : Optional[Any] ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = LayoutLMvaModelTester(self )
__A = ConfigTester(self ,config_class=A ,hidden_size=37 )
def UpperCamelCase_ ( self : List[Any] ,A : int ,A : List[str] ,A : Dict=False ):
__A = copy.deepcopy(A )
if model_class in get_values(A ):
__A = {
k: v.unsqueeze(1 ).expand(-1 ,self.model_tester.num_choices ,-1 ).contiguous()
if isinstance(A ,torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(A ):
__A = torch.ones(self.model_tester.batch_size ,dtype=torch.long ,device=A )
elif model_class in get_values(A ):
__A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
__A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
elif model_class in [
*get_values(A ),
]:
__A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
elif model_class in [
*get_values(A ),
]:
__A = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) ,dtype=torch.long ,device=A ,)
return inputs_dict
def UpperCamelCase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : str ):
__A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__A = type
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
def UpperCamelCase_ ( self : str ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = LayoutLMvaModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
__A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : Any ):
return LayoutLMvaImageProcessor(apply_ocr=A ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self : Dict ):
__A = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(A )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="pt" ).pixel_values.to(A )
__A = torch.tensor([[1, 2]] )
__A = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
__A = model(
input_ids=input_ids.to(A ) ,bbox=bbox.to(A ) ,pixel_values=pixel_values.to(A ) ,)
# verify the logits
__A = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape ,A )
__A = torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,A ,atol=1E-4 ) )
| 55 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import TypedDict
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
def a__ ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(lowerCAmelCase__ ) )]
def a__ ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
UpperCAmelCase_ = all_rotations(lowerCAmelCase__ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
UpperCAmelCase_ = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(lowerCAmelCase__ ),
}
return response
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
UpperCAmelCase_ = int(lowerCAmelCase__ )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(lowerCAmelCase__ ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
UpperCAmelCase_ = [""] * len(lowerCAmelCase__ )
for _ in range(len(lowerCAmelCase__ ) ):
for i in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase_ = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowerCamelCase = """Provide a string that I will generate its BWT transform: """
lowerCamelCase = input(entry_msg).strip()
lowerCamelCase = bwt_transform(s)
print(
F"Burrows Wheeler transform for string '{s}' results "
F"in '{result['bwt_string']}'"
)
lowerCamelCase = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
F"Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' "
F"we get original string '{original_string}'"
)
| 82 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any ,A : List[str] ,A : str=7 ,A : Optional[Any]=3 ,A : Any=18 ,A : int=30 ,A : int=4_00 ,A : List[str]=True ,A : Union[str, Any]=None ,A : Union[str, Any]=True ,A : Tuple=None ,A : Tuple=True ,A : Union[str, Any]=[0.5, 0.5, 0.5] ,A : str=[0.5, 0.5, 0.5] ,A : List[Any]=False ,):
__A = size if size is not None else {"height": 20, "width": 20}
__A = crop_size if crop_size is not None else {"height": 18, "width": 18}
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size
__A = do_center_crop
__A = crop_size
__A = do_normalize
__A = image_mean
__A = image_std
__A = do_reduce_labels
def UpperCamelCase_ ( self : List[str] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
__A = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__A = Image.open(dataset[0]["file"] )
__A = Image.open(dataset[1]["file"] )
return image, map
def UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
__A = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__A = Image.open(ds[0]["file"] )
__A = Image.open(ds[1]["file"] )
__A = Image.open(ds[2]["file"] )
__A = Image.open(ds[3]["file"] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = BeitImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : List[Any] ):
__A = BeitImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : int ):
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A ,"do_resize" ) )
self.assertTrue(hasattr(A ,"size" ) )
self.assertTrue(hasattr(A ,"do_center_crop" ) )
self.assertTrue(hasattr(A ,"center_crop" ) )
self.assertTrue(hasattr(A ,"do_normalize" ) )
self.assertTrue(hasattr(A ,"image_mean" ) )
self.assertTrue(hasattr(A ,"image_std" ) )
def UpperCamelCase_ ( self : List[str] ):
__A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"height": 20, "width": 20} )
self.assertEqual(image_processor.crop_size ,{"height": 18, "width": 18} )
self.assertEqual(image_processor.do_reduce_labels ,A )
__A = self.image_processing_class.from_dict(
self.image_processor_dict ,size=42 ,crop_size=84 ,reduce_labels=A )
self.assertEqual(image_processor.size ,{"height": 42, "width": 42} )
self.assertEqual(image_processor.crop_size ,{"height": 84, "width": 84} )
self.assertEqual(image_processor.do_reduce_labels ,A )
def UpperCamelCase_ ( self : List[Any] ):
pass
def UpperCamelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A ,Image.Image )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : List[str] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A )
for image in image_inputs:
self.assertIsInstance(A ,np.ndarray )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : int ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : str ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
__A = []
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__A = image_processing(image_inputs[0] ,maps[0] ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test batched
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test not batched input (PIL images)
__A , __A = prepare_semantic_single_inputs()
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test batched input (PIL images)
__A , __A = prepare_semantic_batch_inputs()
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
def UpperCamelCase_ ( self : Dict ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__A , __A = prepare_semantic_single_inputs()
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 1_50 )
__A = True
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
| 55 | 0 |
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowerCAmelCase__ = logging.get_logger(__name__)
class __snake_case :
def __init__( self : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = question_encoder
_lowerCamelCase : Any = generator
_lowerCamelCase : List[str] = self.question_encoder
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : int ):
"""simple docstring"""
if os.path.isfile(__lowerCAmelCase ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
_lowerCamelCase : Tuple = os.path.join(__lowerCAmelCase , '''question_encoder_tokenizer''' )
_lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase , '''generator_tokenizer''' )
self.question_encoder.save_pretrained(__lowerCAmelCase )
self.generator.save_pretrained(__lowerCAmelCase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , __lowerCAmelCase : Optional[int] , **__lowerCAmelCase : Any ):
"""simple docstring"""
from ..auto.tokenization_auto import AutoTokenizer
_lowerCamelCase : Dict = kwargs.pop('''config''' , __lowerCAmelCase )
if config is None:
_lowerCamelCase : Any = RagConfig.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : int = AutoTokenizer.from_pretrained(
__lowerCAmelCase , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' )
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(
__lowerCAmelCase , config=config.generator , subfolder='''generator_tokenizer''' )
return cls(question_encoder=__lowerCAmelCase , generator=__lowerCAmelCase )
def __call__( self : Any , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Tuple ):
"""simple docstring"""
return self.current_tokenizer(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Tuple ):
"""simple docstring"""
return self.generator.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple , *__lowerCAmelCase : Any , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
return self.generator.decode(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.question_encoder
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.generator
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[List[str]] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : str = "longest" , __lowerCAmelCase : str = None , __lowerCAmelCase : bool = True , **__lowerCAmelCase : int , ):
"""simple docstring"""
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , __lowerCAmelCase , )
if max_length is None:
_lowerCamelCase : Tuple = self.current_tokenizer.model_max_length
_lowerCamelCase : str = self(
__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=__lowerCAmelCase , max_length=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , **__lowerCAmelCase , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_lowerCamelCase : Tuple = self.current_tokenizer.model_max_length
_lowerCamelCase : Optional[int] = self(
text_target=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=__lowerCAmelCase , padding=__lowerCAmelCase , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase , **__lowerCAmelCase , )
_lowerCamelCase : Dict = labels['''input_ids''']
return model_inputs
| 83 |
from numpy import exp, pi, sqrt
def UpperCAmelCase ( a_ , a_ = 0.0 , a_ = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''',
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Dict = """nllb-moe"""
_UpperCamelCase : Optional[int] = ["""past_key_values"""]
_UpperCamelCase : Optional[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , snake_case=12_8112 , snake_case=1024 , snake_case=12 , snake_case=4096 , snake_case=16 , snake_case=12 , snake_case=4096 , snake_case=16 , snake_case=0.05 , snake_case=0.05 , snake_case=True , snake_case=True , snake_case="relu" , snake_case=1024 , snake_case=0.1 , snake_case=0.1 , snake_case=0.0 , snake_case=0.02 , snake_case=2 , snake_case=True , snake_case=False , snake_case="float32" , snake_case=False , snake_case=128 , snake_case=64 , snake_case=4 , snake_case=4 , snake_case=0.001 , snake_case=0.001 , snake_case="all" , snake_case=False , snake_case=False , snake_case=1.0 , snake_case=0.2 , snake_case=1 , snake_case=0 , snake_case=2 , snake_case=False , **snake_case , ):
lowercase = vocab_size
lowercase = max_position_embeddings
lowercase = d_model
lowercase = encoder_ffn_dim
lowercase = encoder_layers
lowercase = encoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = activation_function
lowercase = init_std
lowercase = encoder_layerdrop
lowercase = decoder_layerdrop
lowercase = use_cache
lowercase = encoder_layers
lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase = router_z_loss_coef
lowercase = router_aux_loss_coef
lowercase = decoder_sparse_step
lowercase = encoder_sparse_step
lowercase = num_experts
lowercase = expert_capacity
lowercase = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
lowercase = router_dtype
lowercase = router_ignore_padding_tokens
lowercase = batch_prioritized_routing
lowercase = second_expert_policy
lowercase = normalize_router_prob_before_dropping
lowercase = moe_eval_capacity_token_fraction
lowercase = moe_token_dropout
lowercase = output_router_logits
super().__init__(
pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , is_encoder_decoder=snake_case , decoder_start_token_id=snake_case , **snake_case , )
| 84 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCamelCase_ ( self : Optional[int] ):
__A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
__A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
__A = "xvjiarui/stable-diffusion-2-inpainting"
__A , __A = FlaxStableDiffusionInpaintPipeline.from_pretrained(A ,safety_checker=A )
__A = "Face of a yellow cat, high resolution, sitting on a park bench"
__A = jax.random.PRNGKey(0 )
__A = 50
__A = jax.device_count()
__A = num_samples * [prompt]
__A = num_samples * [init_image]
__A = num_samples * [mask_image]
__A , __A , __A = pipeline.prepare_inputs(A ,A ,A )
# shard inputs and rng
__A = replicate(A )
__A = jax.random.split(A ,jax.device_count() )
__A = shard(A )
__A = shard(A )
__A = shard(A )
__A = pipeline(
A ,A ,A ,A ,A ,A ,jit=A )
__A = output.images.reshape(A ,5_12 ,5_12 ,3 )
__A = images[0, 2_53:2_56, 2_53:2_56, -1]
__A = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__A = jnp.array(
[0.3_61_13_07, 0.37_64_97_36, 0.3_75_74_08, 0.38_21_39_53, 0.39_29_51_67, 0.3_84_16_31, 0.41_55_49_78, 0.4_13_74_75, 0.4_21_70_84] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 55 | 0 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class snake_case ( unittest.TestCase ):
def __lowercase( self : Dict )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = logging.get_logger()
# the current default level is logging.WARNING
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(a_ )
def __lowercase( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_verbosity()
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger('transformers.models.bart.tokenization_bart' )
SCREAMING_SNAKE_CASE__ : Any = 'Testing 1, 2, 3'
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(a_ ) as cl:
logger.warning(a_ )
self.assertEqual(cl.out , msg + '\n' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(a_ ) as cl:
logger.warning(a_ )
self.assertEqual(cl.out , '' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(a_ ) as cl:
logger.warning(a_ )
self.assertEqual(cl.out , msg + '\n' )
# restore to the original level
logging.set_verbosity(a_ )
@mockenv(TRANSFORMERS_VERBOSITY='error' )
def __lowercase( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
SCREAMING_SNAKE_CASE__ : int = logging.get_logger('transformers.models.bart.tokenization_bart' )
SCREAMING_SNAKE_CASE__ : int = os.getenv('TRANSFORMERS_VERBOSITY' , a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.log_levels[env_level_str]
SCREAMING_SNAKE_CASE__ : str = logging.get_verbosity()
self.assertEqual(
a_ , a_ , F'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
SCREAMING_SNAKE_CASE__ : List[Any] = ''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='super-error' )
def __lowercase( self : Any )-> Union[str, Any]:
"""simple docstring"""
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
SCREAMING_SNAKE_CASE__ : int = logging.logging.getLogger()
with CaptureLogger(a_ ) as cl:
# this action activates the env var
logging.get_logger('transformers.models.bart.tokenization_bart' )
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out )
# no need to restore as nothing was changed
def __lowercase( self : int )-> Tuple:
"""simple docstring"""
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger('transformers.models.bart.tokenization_bart' )
SCREAMING_SNAKE_CASE__ : Dict = 'Testing 1, 2, 3'
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ):
# nothing should be logged as env var disables this method
with CaptureLogger(a_ ) as cl:
logger.warning_advice(a_ )
self.assertEqual(cl.out , '' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(a_ ) as cl:
logger.warning_advice(a_ )
self.assertEqual(cl.out , msg + '\n' )
def _a ( ):
'''simple docstring'''
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 85 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any ,A : Optional[int] ,A : Optional[int]=7 ,A : Optional[Any]=3 ,A : List[str]=18 ,A : Any=30 ,A : Tuple=4_00 ,A : Union[str, Any]=True ,A : Optional[Any]=32 ,A : Union[str, Any]=True ,):
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size_divisor
__A = do_rescale
def UpperCamelCase_ ( self : Union[str, Any] ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = GLPNImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : int ):
__A = GLPNImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Any ):
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A ,"do_resize" ) )
self.assertTrue(hasattr(A ,"size_divisor" ) )
self.assertTrue(hasattr(A ,"resample" ) )
self.assertTrue(hasattr(A ,"do_rescale" ) )
def UpperCamelCase_ ( self : str ):
pass
def UpperCamelCase_ ( self : Dict ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A ,Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def UpperCamelCase_ ( self : Optional[Any] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A )
for image in image_inputs:
self.assertIsInstance(A ,np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def UpperCamelCase_ ( self : int ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 55 | 0 |
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __snake_case ( __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = torch.exp(__UpperCamelCase )
A_ = torch.sum(__UpperCamelCase ,dim=1 ) # sum of exp(x_i)
A_ = torch.sum(x * exp_x ,dim=1 ) # sum of x_i * exp(x_i)
return torch.log(__UpperCamelCase ) - B / A
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : List[Any] ):
super().__init__()
A_ = config.output_attentions
A_ = config.output_hidden_states
A_ = nn.ModuleList([BertLayer(UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
A_ = nn.ModuleList([BertHighway(UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
A_ = [-1 for _ in range(config.num_hidden_layers )]
def __A ( self : str , UpperCAmelCase : Dict ):
if (type(UpperCAmelCase ) is float) or (type(UpperCAmelCase ) is int):
for i in range(len(self.early_exit_entropy ) ):
A_ = x
else:
A_ = x
def __A ( self : Dict , UpperCAmelCase : Optional[Any] ):
A_ = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __A ( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=None , ):
A_ = ()
A_ = ()
A_ = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
A_ = all_hidden_states + (hidden_states,)
A_ = layer_module(
UpperCAmelCase , UpperCAmelCase , head_mask[i] , UpperCAmelCase , UpperCAmelCase )
A_ = layer_outputs[0]
if self.output_attentions:
A_ = all_attentions + (layer_outputs[1],)
A_ = (hidden_states,)
if self.output_hidden_states:
A_ = current_outputs + (all_hidden_states,)
if self.output_attentions:
A_ = current_outputs + (all_attentions,)
A_ = self.highway[i](UpperCAmelCase )
# logits, pooled_output
if not self.training:
A_ = highway_exit[0]
A_ = entropy(UpperCAmelCase )
A_ = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
A_ = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
A_ = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCAmelCase , i + 1 )
else:
A_ = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
A_ = all_hidden_states + (hidden_states,)
A_ = (hidden_states,)
if self.output_hidden_states:
A_ = outputs + (all_hidden_states,)
if self.output_attentions:
A_ = outputs + (all_attentions,)
A_ = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'The Bert Model transformer with early exiting (DeeBERT). ' , snake_case_ , )
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : Optional[int] ):
super().__init__(UpperCAmelCase )
A_ = config
A_ = BertEmbeddings(UpperCAmelCase )
A_ = DeeBertEncoder(UpperCAmelCase )
A_ = BertPooler(UpperCAmelCase )
self.init_weights()
def __A ( self : Any ):
self.encoder.init_highway_pooler(self.pooler )
def __A ( self : Union[str, Any] ):
return self.embeddings.word_embeddings
def __A ( self : List[Any] , UpperCAmelCase : Optional[int] ):
A_ = value
def __A ( self : Any , UpperCAmelCase : int ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCAmelCase )
@add_start_docstrings_to_model_forward(UpperCAmelCase )
def __A ( self : Optional[Any] , UpperCAmelCase : Dict=None , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Tuple=None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Tuple=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
A_ = input_ids.size()
elif inputs_embeds is not None:
A_ = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
A_ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
A_ = torch.ones(UpperCAmelCase , device=UpperCAmelCase )
if encoder_attention_mask is None:
A_ = torch.ones(UpperCAmelCase , device=UpperCAmelCase )
if token_type_ids is None:
A_ = torch.zeros(UpperCAmelCase , dtype=torch.long , device=UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
A_ = self.get_extended_attention_mask(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
A_ = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
A_ = encoder_attention_mask[:, None, None, :]
A_ = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
A_ = (1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
A_ = self.get_head_mask(UpperCAmelCase , self.config.num_hidden_layers )
A_ = self.embeddings(
input_ids=UpperCAmelCase , position_ids=UpperCAmelCase , token_type_ids=UpperCAmelCase , inputs_embeds=UpperCAmelCase )
A_ = self.encoder(
UpperCAmelCase , attention_mask=UpperCAmelCase , head_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , )
A_ = encoder_outputs[0]
A_ = self.pooler(UpperCAmelCase )
A_ = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] ):
A_ = message
A_ = exit_layer # start from 1!
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : Any ):
super().__init__()
A_ = BertPooler(UpperCAmelCase )
A_ = nn.Dropout(config.hidden_dropout_prob )
A_ = nn.Linear(config.hidden_size , config.num_labels )
def __A ( self : str , UpperCAmelCase : str ):
# Pooler
A_ = encoder_outputs[0]
A_ = self.pooler(UpperCAmelCase )
# "return" pooler_output
# BertModel
A_ = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
A_ = bmodel_output[1]
A_ = self.dropout(UpperCAmelCase )
A_ = self.classifier(UpperCAmelCase )
return logits, pooled_output
@add_start_docstrings(
'Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. ' , snake_case_ , )
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : int , UpperCAmelCase : Optional[Any] ):
super().__init__(UpperCAmelCase )
A_ = config.num_labels
A_ = config.num_hidden_layers
A_ = DeeBertModel(UpperCAmelCase )
A_ = nn.Dropout(config.hidden_dropout_prob )
A_ = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCAmelCase )
def __A ( self : str , UpperCAmelCase : Dict=None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : int=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : List[str]=-1 , UpperCAmelCase : List[Any]=False , ):
A_ = self.num_layers
try:
A_ = self.bert(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , position_ids=UpperCAmelCase , head_mask=UpperCAmelCase , inputs_embeds=UpperCAmelCase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
A_ = outputs[1]
A_ = self.dropout(UpperCAmelCase )
A_ = self.classifier(UpperCAmelCase )
A_ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
A_ = e.message
A_ = e.exit_layer
A_ = outputs[0]
if not self.training:
A_ = entropy(UpperCAmelCase )
A_ = []
A_ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
A_ = MSELoss()
A_ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
A_ = CrossEntropyLoss()
A_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
A_ = []
for highway_exit in outputs[-1]:
A_ = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCAmelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
A_ = MSELoss()
A_ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
A_ = CrossEntropyLoss()
A_ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCAmelCase )
if train_highway:
A_ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
A_ = (loss,) + outputs
if not self.training:
A_ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
A_ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 86 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case_ = Features({"image": Image()} )
snake_case_ = Features({"labels": ClassLabel} )
snake_case_ = "image"
snake_case_ = "labels"
def UpperCamelCase_ ( self : Optional[Any] ,A : Tuple ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] ,A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
__A = copy.deepcopy(self )
__A = self.label_schema.copy()
__A = features[self.label_column]
__A = label_schema
return task_template
@property
def UpperCamelCase_ ( self : Any ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 55 | 0 |
def SCREAMING_SNAKE_CASE ( lowercase_=28_123 ) -> Union[str, Any]:
"""simple docstring"""
A__ = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
A__ = set()
A__ = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(lowercase_ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 87 |
from math import sqrt
def UpperCAmelCase ( a_ ) -> bool:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number >= 0
), "'number' must been an int and positive"
__A = True
# 0 and 1 are none primes.
if number <= 1:
__A = False
for divisor in range(2 , int(round(sqrt(a_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__A = False
break
# precondition
assert isinstance(a_ , a_ ), "'status' must been from type bool"
return status
def UpperCAmelCase ( a_ ) -> Any:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__A = list(range(2 , n + 1 ) )
__A = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(a_ ) ):
for j in range(i + 1 , len(a_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__A = 0
# filters actual prime numbers.
__A = [x for x in begin_list if x != 0]
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n > 2), "'N' must been an int and > 2"
__A = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(a_ ):
ans.append(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and number >= 0, "'number' must been an int and >= 0"
__A = [] # this list will be returns of the function.
# potential prime number factors.
__A = 2
__A = number
if number == 0 or number == 1:
ans.append(a_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(a_ ):
while quotient != 1:
if is_prime(a_ ) and (quotient % factor == 0):
ans.append(a_ )
quotient /= factor
else:
factor += 1
else:
ans.append(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase ( a_ ) -> Any:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__A = 0
# prime factorization of 'number'
__A = prime_factorization(a_ )
__A = max(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type int"
return ans
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__A = 0
# prime factorization of 'number'
__A = prime_factorization(a_ )
__A = min(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type int"
return ans
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
assert isinstance(a_ , a_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , a_ ), "compare bust been from type bool"
return number % 2 == 0
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
assert isinstance(a_ , a_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , a_ ), "compare bust been from type bool"
return number % 2 != 0
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
assert (
isinstance(a_ , a_ ) and (number > 2) and is_even(a_ )
), "'number' must been an int, even and > 2"
__A = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__A = get_prime_numbers(a_ )
__A = len(a_ )
# run variable for while-loops.
__A = 0
__A = None
# exit variable. for break up the loops
__A = True
while i < len_pn and loop:
__A = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__A = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(a_ , a_ )
and (len(a_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def UpperCAmelCase ( a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__A = 0
while numbera != 0:
__A = numbera % numbera
__A = numbera
__A = rest
# precondition
assert isinstance(a_ , a_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def UpperCAmelCase ( a_ , a_ ) -> List[str]:
"""simple docstring"""
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__A = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__A = prime_factorization(a_ )
__A = prime_factorization(a_ )
elif numbera == 1 or numbera == 1:
__A = []
__A = []
__A = max(a_ , a_ )
__A = 0
__A = 0
__A = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__A = prime_fac_a.count(a_ )
__A = prime_fac_a.count(a_ )
for _ in range(max(a_ , a_ ) ):
ans *= n
else:
__A = prime_fac_a.count(a_ )
for _ in range(a_ ):
ans *= n
done.append(a_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__A = prime_fac_a.count(a_ )
for _ in range(a_ ):
ans *= n
done.append(a_ )
# precondition
assert isinstance(a_ , a_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 0), "'number' must been a positive int"
__A = 0
__A = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(a_ ):
ans += 1
# precondition
assert isinstance(a_ , a_ ) and is_prime(
a_ ), "'ans' must been a prime number and from type int"
return ans
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
assert (
is_prime(a_ ) and is_prime(a_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__A = p_number_a + 1 # jump to the next number
__A = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(a_ ):
number += 1
while number < p_number_a:
ans.append(a_ )
number += 1
# fetch the next prime number.
while not is_prime(a_ ):
number += 1
# precondition
assert (
isinstance(a_ , a_ )
and ans[0] != p_number_a
and ans[len(a_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 1), "'n' must been int and >= 1"
__A = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(a_ )
# precondition
assert ans[0] == 1 and ans[len(a_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number > 1
), "'number' must been an int and >= 1"
__A = get_divisors(a_ )
# precondition
assert (
isinstance(a_ , a_ )
and (divisors[0] == 1)
and (divisors[len(a_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def UpperCAmelCase ( a_ , a_ ) -> str:
"""simple docstring"""
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__A = gcd(abs(a_ ) , abs(a_ ) )
# precondition
assert (
isinstance(a_ , a_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 0), "'n' must been a int and >= 0"
__A = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 0), "'n' must been an int and >= 0"
__A = 0
__A = 1
__A = 1 # this will be return
for _ in range(n - 1 ):
__A = ans
ans += fiba
__A = tmp
return ans
| 55 | 0 |
"""simple docstring"""
import math
def _snake_case ( __snake_case : int ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : int = 0
while num > 0:
_lowerCamelCase : Any = num % 8
_lowerCamelCase : int = octal + (remainder * math.floor(math.pow(10 , __snake_case ) ))
counter += 1
_lowerCamelCase : str = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F'0o{int(__snake_case )}'
def _snake_case ( ):
"""simple docstring"""
print("""\n2 in octal is:""" )
print(decimal_to_octal(2 ) ) # = 2
print("""\n8 in octal is:""" )
print(decimal_to_octal(8 ) ) # = 10
print("""\n65 in octal is:""" )
print(decimal_to_octal(65 ) ) # = 101
print("""\n216 in octal is:""" )
print(decimal_to_octal(216 ) ) # = 330
print("""\n512 in octal is:""" )
print(decimal_to_octal(512 ) ) # = 1000
print("""\n""" )
if __name__ == "__main__":
main()
| 88 |
import os
def UpperCAmelCase ( ) -> Any:
"""simple docstring"""
__A = os.path.dirname(os.path.realpath(a_ ) )
__A = os.path.join(a_ , "triangle.txt" )
with open(a_ ) as f:
__A = f.readlines()
__A = []
for line in triangle:
__A = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(a_ ) )
a.append(a_ )
for i in range(1 , len(a_ ) ):
for j in range(len(a[i] ) ):
__A = a[i - 1][j] if j != len(a[i - 1] ) else 0
__A = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(a_ , a_ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 55 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : str = tempfile.mkdtemp()
# fmt: off
_lowercase : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
_lowercase : List[Any] = dict(zip(lowerCamelCase, range(len(lowerCamelCase))))
_lowercase : Tuple = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
_lowercase : Any = {'unk_token': '<unk>'}
_lowercase : List[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
_lowercase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as fp:
fp.write(json.dumps(lowerCamelCase) + '\n')
with open(self.merges_file, 'w', encoding='utf-8') as fp:
fp.write('\n'.join(lowerCamelCase))
_lowercase : Optional[Any] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
_lowercase : Optional[int] = os.path.join(self.tmpdirname, lowerCamelCase)
with open(self.image_processor_file, 'w', encoding='utf-8') as fp:
json.dump(lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self, **lowerCamelCase) -> int:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase)
def UpperCamelCase ( self, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **lowerCamelCase)
def UpperCamelCase ( self, **lowerCamelCase) -> Any:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Any = [np.random.randint(2_55, size=(3, 30, 4_00), dtype=np.uinta)]
_lowercase : Union[str, Any] = [Image.fromarray(np.moveaxis(lowerCamelCase, 0, -1)) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = self.get_tokenizer()
_lowercase : Dict = self.get_rust_tokenizer()
_lowercase : Dict = self.get_image_processor()
_lowercase : Tuple = CLIPSegProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase)
processor_slow.save_pretrained(self.tmpdirname)
_lowercase : Dict = CLIPSegProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCamelCase)
_lowercase : List[Any] = CLIPSegProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase)
processor_fast.save_pretrained(self.tmpdirname)
_lowercase : str = CLIPSegProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, lowerCamelCase)
self.assertIsInstance(processor_fast.tokenizer, lowerCamelCase)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, lowerCamelCase)
self.assertIsInstance(processor_fast.image_processor, lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = CLIPSegProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_lowercase : str = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)')
_lowercase : Union[str, Any] = self.get_image_processor(do_normalize=lowerCamelCase, padding_value=1.0)
_lowercase : Union[str, Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=lowerCamelCase, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, lowerCamelCase)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Dict = self.get_image_processor()
_lowercase : int = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPSegProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase)
_lowercase : Optional[int] = self.prepare_image_inputs()
_lowercase : str = image_processor(lowerCamelCase, return_tensors='np')
_lowercase : Optional[int] = processor(images=lowerCamelCase, return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = self.get_image_processor()
_lowercase : Optional[int] = self.get_tokenizer()
_lowercase : int = CLIPSegProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase)
_lowercase : str = 'lower newer'
_lowercase : Dict = processor(text=lowerCamelCase)
_lowercase : Union[str, Any] = tokenizer(lowerCamelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Dict = self.get_image_processor()
_lowercase : Tuple = self.get_tokenizer()
_lowercase : int = CLIPSegProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase)
_lowercase : List[Any] = 'lower newer'
_lowercase : Any = self.prepare_image_inputs()
_lowercase : Any = processor(text=lowerCamelCase, images=lowerCamelCase)
self.assertListEqual(list(inputs.keys()), ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase):
processor()
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = self.get_image_processor()
_lowercase : Any = self.get_tokenizer()
_lowercase : Tuple = CLIPSegProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase)
_lowercase : str = self.prepare_image_inputs()
_lowercase : str = self.prepare_image_inputs()
_lowercase : Union[str, Any] = processor(images=lowerCamelCase, visual_prompt=lowerCamelCase)
self.assertListEqual(list(inputs.keys()), ['pixel_values', 'conditional_pixel_values'])
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase):
processor()
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : List[str] = self.get_image_processor()
_lowercase : Dict = self.get_tokenizer()
_lowercase : List[Any] = CLIPSegProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase)
_lowercase : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowercase : Optional[int] = processor.batch_decode(lowerCamelCase)
_lowercase : List[str] = tokenizer.batch_decode(lowerCamelCase)
self.assertListEqual(lowerCamelCase, lowerCamelCase)
| 89 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
SCREAMING_SNAKE_CASE :Union[str, Any] = object()
# For specifying empty leaf dict `{}`
SCREAMING_SNAKE_CASE :List[str] = object()
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
__A = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(a_ ) - len(a_ ) + 1 ):
__A = [x.match(a_ ) for x, y in zip(a_ , ks[i:] )]
if matches and all(a_ ):
return True
return False
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
def replace(a_ , a_ ):
for rule, replacement in rules:
if _match(a_ , a_ ):
return replacement
return val
return replace
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , a_ )),
(("transformer", "wte", "embedding"), P("mp" , a_ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(a_ , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , a_ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(a_ , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , a_ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
__A = _get_partition_rules()
__A = _replacement_rules(a_ )
__A = {k: _unmatched for k in flatten_dict(a_ )}
__A = {k: replace(a_ , a_ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(a_ ) )
| 55 | 0 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
__UpperCAmelCase = True
except (ImportError, AttributeError):
__UpperCAmelCase = object
def _snake_case ( *A , **A ) -> str:
pass
__UpperCAmelCase = False
__UpperCAmelCase = logging.get_logger('''transformers-cli/serving''')
def _snake_case ( A ) -> Optional[int]:
lowerCAmelCase__ = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(A , args.host , args.port , args.workers )
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : dict
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : List[str]
lowercase__ : Optional[List[int]]
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : str
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Any
class a__ ( a__ ):
'''simple docstring'''
@staticmethod
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ) -> List[str]:
lowerCAmelCase__ = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=lowerCamelCase_ , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=lowerCamelCase_ , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=lowerCamelCase_ , default=88_88 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=lowerCamelCase_ , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=lowerCamelCase_ , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=lowerCamelCase_ , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=lowerCamelCase_ , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=lowerCamelCase_ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=lowerCamelCase_ )
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
lowerCAmelCase__ = pipeline
lowerCAmelCase__ = host
lowerCAmelCase__ = port
lowerCAmelCase__ = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(F"""Serving model over {host}:{port}""" )
lowerCAmelCase__ = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=lowerCamelCase_ , response_class=lowerCamelCase_ , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=lowerCamelCase_ , response_class=lowerCamelCase_ , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=lowerCamelCase_ , response_class=lowerCamelCase_ , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=lowerCamelCase_ , response_class=lowerCamelCase_ , methods=['''POST'''] , ),
] , timeout=6_00 , )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ = Body(lowerCamelCase_ , embed=lowerCamelCase_ ) , lowerCamelCase_ = Body(lowerCamelCase_ , embed=lowerCamelCase_ ) ) -> List[Any]:
try:
lowerCAmelCase__ = self._pipeline.tokenizer.tokenize(lowerCamelCase_ )
if return_ids:
lowerCAmelCase__ = self._pipeline.tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
return ServeTokenizeResult(tokens=lowerCamelCase_ , tokens_ids=lowerCamelCase_ )
else:
return ServeTokenizeResult(tokens=lowerCamelCase_ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={'''model''': '''''', '''error''': str(lowerCamelCase_ )} )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ = Body(lowerCamelCase_ , embed=lowerCamelCase_ ) , lowerCamelCase_ = Body(lowerCamelCase_ , embed=lowerCamelCase_ ) , lowerCamelCase_ = Body(lowerCamelCase_ , embed=lowerCamelCase_ ) , ) -> Optional[Any]:
try:
lowerCAmelCase__ = self._pipeline.tokenizer.decode(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return ServeDeTokenizeResult(model='''''' , text=lowerCamelCase_ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={'''model''': '''''', '''error''': str(lowerCamelCase_ )} )
async def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=Body(lowerCamelCase_ , embed=lowerCamelCase_ ) ) -> List[str]:
# Check we don't have empty string
if len(lowerCamelCase_ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
lowerCAmelCase__ = self._pipeline(lowerCamelCase_ )
return ServeForwardResult(output=lowerCamelCase_ )
except Exception as e:
raise HTTPException(5_00 , {'''error''': str(lowerCamelCase_ )} )
| 90 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] ,A : Union[str, Any] ,A : List[Any]=13 ,A : Optional[Any]=30 ,A : Union[str, Any]=2 ,A : Union[str, Any]=3 ,A : Any=True ,A : Dict=True ,A : str=32 ,A : Tuple=2 ,A : Optional[int]=4 ,A : Tuple=37 ,A : List[Any]="gelu" ,A : Dict=0.1 ,A : Optional[int]=0.1 ,A : List[Any]=10 ,A : Optional[Any]=0.02 ,A : Dict=3 ,A : Dict=None ,A : List[Any]=2 ,):
__A = parent
__A = batch_size
__A = image_size
__A = patch_size
__A = num_channels
__A = is_training
__A = use_labels
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = type_sequence_label_size
__A = initializer_range
__A = scope
__A = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__A = (image_size // patch_size) ** 2
__A = num_patches + 2
def UpperCamelCase_ ( self : List[Any] ):
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__A = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : Optional[int] ):
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=A ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def UpperCamelCase_ ( self : List[str] ,A : List[Any] ,A : Optional[int] ,A : Union[str, Any] ):
__A = TFDeiTModel(config=A )
__A = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : List[Any] ,A : List[Any] ,A : Optional[Any] ,A : Dict ):
__A = TFDeiTForMaskedImageModeling(config=A )
__A = model(A )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__A = 1
__A = TFDeiTForMaskedImageModeling(A )
__A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A = model(A )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase_ ( self : Optional[Any] ,A : Union[str, Any] ,A : Dict ,A : Union[str, Any] ):
__A = self.type_sequence_label_size
__A = TFDeiTForImageClassification(A )
__A = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__A = 1
__A = TFDeiTForImageClassification(A )
__A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self : str ):
__A = self.prepare_config_and_inputs()
__A , __A , __A = config_and_inputs
__A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
snake_case_ = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def UpperCamelCase_ ( self : str ):
__A = TFDeiTModelTester(self )
__A = ConfigTester(self ,config_class=A ,has_text_modality=A ,hidden_size=37 )
def UpperCamelCase_ ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def UpperCamelCase_ ( self : Union[str, Any] ):
pass
def UpperCamelCase_ ( self : List[Any] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
__A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A ,tf.keras.layers.Dense ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
__A = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,A )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCamelCase_ ( self : Optional[int] ,A : Union[str, Any] ,A : List[str] ,A : Optional[Any]=False ):
__A = super()._prepare_for_class(A ,A ,return_labels=A )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def UpperCamelCase_ ( self : Any ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = TFDeiTModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
__A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : int ):
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase_ ( self : Optional[int] ):
__A = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="tf" )
# forward pass
__A = model(**A )
# verify the logits
__A = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape ,A )
__A = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,A ,atol=1E-4 ) )
| 55 | 0 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
A = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
A = AutoTokenizer.from_pretrained('xlm-roberta-base' )
A = 'The dog is cute and lives in the garden house'
A = jnp.array([tokenizer.encode(A_ )] )
A = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
A = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
A = model(A_ )['last_hidden_state']
self.assertEqual(output.shape ,A_ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] ,A_ ,atol=1e-3 ) )
| 91 |
SCREAMING_SNAKE_CASE :List[Any] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
SCREAMING_SNAKE_CASE :Union[str, Any] = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
SCREAMING_SNAKE_CASE :int = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def UpperCAmelCase ( a_ , a_ , a_ ) -> str:
"""simple docstring"""
assert len(str(a_ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 1_2, "month should be between 1 to 12"
assert 1 <= day <= 3_1, "day should be between 1 to 31"
# Doomsday algorithm:
__A = year // 1_0_0
__A = (5 * (century % 4) + 2) % 7
__A = year % 1_0_0
__A = centurian % 1_2
__A = (
(centurian // 1_2) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__A = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_0_0) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__A = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : Any ) -> Union[str, Any]: # noqa: E741
lowercase : List[Any] =len(__magic_name__ )
lowercase : List[str] =0
lowercase : List[str] =[0] * n
lowercase : List[Any] =[False] * n
lowercase : List[str] =[False] * n
def dfs(__magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : str ):
if parent == root:
out_edge_count += 1
lowercase : Dict =True
lowercase : int =at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
lowercase : Optional[int] =dfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
lowercase : int =min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
lowercase : str =True
# AP found via cycle
if at == low[to]:
lowercase : str =True
else:
lowercase : Optional[Any] =min(low[at] , __magic_name__ )
return out_edge_count
for i in range(__magic_name__ ):
if not visited[i]:
lowercase : Optional[Any] =0
lowercase : List[Any] =dfs(__magic_name__ , __magic_name__ , -1 , __magic_name__ )
lowercase : int =out_edge_count > 1
for x in range(len(__magic_name__ ) ):
if is_art[x] is True:
print(__magic_name__ )
# Adjacency list of graph
UpperCamelCase_ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 92 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def UpperCAmelCase ( a_ = "isbn/0140328726" ) -> dict:
"""simple docstring"""
__A = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
__A = F'''{olid} is not a valid Open Library olid'''
raise ValueError(a_ )
return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json()
def UpperCAmelCase ( a_ ) -> dict:
"""simple docstring"""
__A = {
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
__A = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__A = [
get_openlibrary_data(author["key"] )["name"] for author in data["Authors"]
]
__A = data["First sentence"]["value"]
for key, value in data.items():
if isinstance(a_ , a_ ):
__A = ", ".join(a_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
SCREAMING_SNAKE_CASE :int = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
SCREAMING_SNAKE_CASE :Any = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print('\n'.join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 55 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 93 |
import requests
SCREAMING_SNAKE_CASE :List[str] = 'YOUR API KEY'
def UpperCAmelCase ( a_ , a_ = giphy_api_key ) -> list:
"""simple docstring"""
__A = "+".join(query.split() )
__A = F'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
__A = requests.get(a_ ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 55 | 0 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowercase_ ( __A : str , __A : str = "cpu" , __A : Union[str, None] = None ) -> None:
"""simple docstring"""
lowercase : str =torch.load(__A , map_location=__A )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__A , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
lowercase : str =v.half()
if save_path is None: # overwrite src_path
lowercase : Any =src_path
torch.save(__A , __A )
if __name__ == "__main__":
fire.Fire(convert)
| 94 |
import itertools
import math
def UpperCAmelCase ( a_ ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
__A = 2
while True:
if is_prime(a_ ):
yield num
num += 1
def UpperCAmelCase ( a_ = 1_0_0_0_1 ) -> int:
"""simple docstring"""
return next(itertools.islice(prime_generator() , nth - 1 , a_ ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 55 | 0 |
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCamelCase_ (__A ):
__magic_name__ = ['''image_processor''', '''tokenizer''']
__magic_name__ = '''OwlViTImageProcessor'''
__magic_name__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : List[Any] , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : Union[str, Any] ) -> str:
UpperCAmelCase_ : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCAmelCase_ , )
UpperCAmelCase_ : int = kwargs.pop("feature_extractor" )
UpperCAmelCase_ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
def __call__( self : Optional[int] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[int]="max_length" , lowerCAmelCase_ : Tuple="np" , **lowerCAmelCase_ : List[Any] ) -> List[str]:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or (isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and not isinstance(text[0] , lowerCAmelCase_ )):
UpperCAmelCase_ : int = [self.tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )]
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(text[0] , lowerCAmelCase_ ):
UpperCAmelCase_ : str = []
# Maximum number of queries across batch
UpperCAmelCase_ : Union[str, Any] = max([len(lowerCAmelCase_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowerCAmelCase_ ) != max_num_queries:
UpperCAmelCase_ : int = t + [" "] * (max_num_queries - len(lowerCAmelCase_ ))
UpperCAmelCase_ : List[str] = self.tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
encodings.append(lowerCAmelCase_ )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
UpperCAmelCase_ : List[str] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
UpperCAmelCase_ : Dict = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
UpperCAmelCase_ : Optional[int] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
UpperCAmelCase_ : Optional[int] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
UpperCAmelCase_ : str = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
UpperCAmelCase_ : Any = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
UpperCAmelCase_ : Optional[Any] = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
UpperCAmelCase_ : Any = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
UpperCAmelCase_ : Union[str, Any] = BatchEncoding()
UpperCAmelCase_ : List[str] = input_ids
UpperCAmelCase_ : Any = attention_mask
if query_images is not None:
UpperCAmelCase_ : Optional[int] = BatchEncoding()
UpperCAmelCase_ : Any = self.image_processor(
lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ ).pixel_values
UpperCAmelCase_ : str = query_pixel_values
if images is not None:
UpperCAmelCase_ : Tuple = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
if text is not None and images is not None:
UpperCAmelCase_ : List[str] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
UpperCAmelCase_ : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase_ ) , tensor_type=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : int ) -> Optional[Any]:
return self.image_processor.post_process(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Optional[int] ) -> Dict:
return self.image_processor.post_process_object_detection(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Dict ) -> Tuple:
return self.image_processor.post_process_image_guided_detection(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , *lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : Tuple ) -> List[str]:
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Optional[Any] ) -> int:
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCAmelCase_ , )
return self.image_processor_class
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> Any:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCAmelCase_ , )
return self.image_processor
| 95 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCAmelCase ( a_ , a_ , a_ ) -> List[str]:
"""simple docstring"""
__A = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
__A = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(a_ ):
os.makedirs(a_ )
__A = model.state_dict()
def to_tf_var_name(a_ ):
for patt, repl in iter(a_ ):
__A = name.replace(a_ , a_ )
return F'''bert/{name}'''
def create_tf_var(a_ , a_ , a_ ):
__A = tf.dtypes.as_dtype(tensor.dtype )
__A = tf.get_variable(dtype=a_ , shape=tensor.shape , name=a_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(a_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__A = to_tf_var_name(a_ )
__A = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__A = torch_tensor.T
__A = create_tf_var(tensor=a_ , name=a_ , session=a_ )
tf.keras.backend.set_value(a_ , a_ )
__A = session.run(a_ )
print(F'''Successfully created {tf_name}: {np.allclose(a_ , a_ )}''' )
__A = tf.train.Saver(tf.trainable_variables() )
saver.save(a_ , os.path.join(a_ , model_name.replace("-" , "_" ) + ".ckpt" ) )
def UpperCAmelCase ( a_=None ) -> List[Any]:
"""simple docstring"""
__A = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=a_ , required=a_ , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=a_ , default=a_ , required=a_ , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=a_ , required=a_ , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=a_ , required=a_ , help="Directory in which to save tensorflow model" )
__A = parser.parse_args(a_ )
__A = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=a_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 55 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 96 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE :Any = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Any = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55 | 0 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 97 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE :int = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def UpperCAmelCase ( a_ = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
__A = BeautifulSoup(requests.get(url + location ).content , "html.parser" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("div" , attrs={"data-tn-component": "organicJob"} ):
__A = job.find("a" , attrs={"data-tn-element": "jobTitle"} ).text.strip()
__A = job.find("span" , {"class": "company"} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f'''Job {i:>2} is {job[0]} at {job[1]}''')
| 55 | 0 |
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def a__ ( lowercase : str ) -> List[Any]:
"""simple docstring"""
return EnvironmentCommand()
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@staticmethod
def snake_case__ ( lowerCAmelCase__ : ArgumentParser ) -> str:
'''simple docstring'''
_UpperCamelCase = parser.add_parser('''env''' )
download_parser.set_defaults(func=lowerCAmelCase__ )
def snake_case__ ( self : Tuple ) -> Any:
'''simple docstring'''
_UpperCamelCase = huggingface_hub.__version__
_UpperCamelCase = '''not installed'''
_UpperCamelCase = '''NA'''
if is_torch_available():
import torch
_UpperCamelCase = torch.__version__
_UpperCamelCase = torch.cuda.is_available()
_UpperCamelCase = '''not installed'''
if is_transformers_available():
import transformers
_UpperCamelCase = transformers.__version__
_UpperCamelCase = '''not installed'''
if is_accelerate_available():
import accelerate
_UpperCamelCase = accelerate.__version__
_UpperCamelCase = '''not installed'''
if is_xformers_available():
import xformers
_UpperCamelCase = xformers.__version__
_UpperCamelCase = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(lowerCAmelCase__ ) )
return info
@staticmethod
def snake_case__ ( lowerCAmelCase__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 98 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : List[str] ):
__A = tempfile.mkdtemp()
__A = BlipImageProcessor()
__A = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
__A = BlipaProcessor(A ,A )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Dict ,**A : int ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**A ).tokenizer
def UpperCamelCase_ ( self : Dict ,**A : Optional[int] ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**A ).image_processor
def UpperCamelCase_ ( self : Dict ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self : Optional[int] ):
__A = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
__A = [Image.fromarray(np.moveaxis(A ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self : Any ):
__A = BlipaProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" )
__A = self.get_image_processor(do_normalize=A ,padding_value=1.0 )
__A = BlipaProcessor.from_pretrained(
self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=A ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,A )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,A )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = self.prepare_image_inputs()
__A = image_processor(A ,return_tensors="np" )
__A = processor(images=A ,return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase_ ( self : Tuple ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = processor(text=A )
__A = tokenizer(A ,return_token_type_ids=A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase_ ( self : int ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = self.prepare_image_inputs()
__A = processor(text=A ,images=A )
self.assertListEqual(list(inputs.keys() ) ,["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A = processor.batch_decode(A )
__A = tokenizer.batch_decode(A )
self.assertListEqual(A ,A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = self.prepare_image_inputs()
__A = processor(text=A ,images=A )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,["pixel_values", "input_ids", "attention_mask"] )
| 55 | 0 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def a (lowerCAmelCase__ = "isbn/0140328726" ):
__a = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
__a = f'''{olid} is not a valid Open Library olid'''
raise ValueError(lowerCAmelCase__ )
return requests.get(f'''https://openlibrary.org/{new_olid}.json''' ).json()
def a (lowerCAmelCase__ ):
__a = {
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
__a = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__a = [
get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""]
]
__a = data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = """, """.join(lowerCAmelCase__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
SCREAMING_SNAKE_CASE = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (1_0, 1_3) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
SCREAMING_SNAKE_CASE = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print('\n'.join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 99 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int ,A : Any ,A : List[str] ,A : Union[str, Any]=10_24 ,A : int=10_24 ,A : Optional[Any]=3.6 ):
__A = tokenizer
__A = tokenizer.bos_token_id
__A = dataset
__A = seq_length
__A = seq_length * chars_per_token * num_of_sequences
def __iter__( self : List[Any] ):
__A = iter(self.dataset )
__A = True
while more_examples:
__A , __A = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(A )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
__A = False
break
__A = tokenizer(A ,truncation=A )["input_ids"]
__A = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 ,len(A ) ,self.seq_length ):
__A = all_token_ids[i : i + self.seq_length]
if len(A ) == self.seq_length:
yield torch.tensor(A )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
__A = {"streaming": True}
__A = load_dataset(args.dataset_name , split="train" , **a_ )
__A = ConstantLengthDataset(a_ , a_ , seq_length=args.seq_length )
__A = DataLoader(a_ , batch_size=args.batch_size )
return eval_dataloader
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
model.eval()
__A = []
for step, batch in enumerate(a_ ):
with torch.no_grad():
__A = model(a_ , labels=a_ )
__A = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(a_ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__A = torch.mean(torch.cat(a_ ) )
try:
__A = torch.exp(a_ )
except OverflowError:
__A = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
SCREAMING_SNAKE_CASE :Optional[int] = Accelerator()
# Parse configuration
SCREAMING_SNAKE_CASE :str = HfArgumentParser(EvaluationArguments)
SCREAMING_SNAKE_CASE :int = parser.parse_args()
set_seed(args.seed)
# Logging
SCREAMING_SNAKE_CASE :Dict = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
SCREAMING_SNAKE_CASE :List[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
SCREAMING_SNAKE_CASE :int = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
SCREAMING_SNAKE_CASE :List[str] = create_dataloader(args)
# Prepare everything with our `accelerator`.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :List[Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :List[Any] = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 55 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A : Any = {
"""configuration_poolformer""": [
"""POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""PoolFormerConfig""",
"""PoolFormerOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any = ["""PoolFormerFeatureExtractor"""]
_A : Dict = ["""PoolFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[int] = [
"""POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PoolFormerForImageClassification""",
"""PoolFormerModel""",
"""PoolFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_A : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 100 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = LayoutLMTokenizer
snake_case_ = LayoutLMTokenizerFast
snake_case_ = True
snake_case_ = True
def UpperCamelCase_ ( self : Any ):
super().setUp()
__A = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCamelCase_ ( self : Tuple ,**A : int ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**A )
def UpperCamelCase_ ( self : Optional[Any] ,A : Any ):
__A = "UNwant\u00E9d,running"
__A = "unwanted, running"
return input_text, output_text
def UpperCamelCase_ ( self : str ):
__A = self.tokenizer_class(self.vocab_file )
__A = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(A ,["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,[7, 4, 5, 10, 8, 9] )
def UpperCamelCase_ ( self : int ):
pass
| 55 | 0 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __lowercase :
"""simple docstring"""
_UpperCAmelCase = BlenderbotConfig
_UpperCAmelCase = {}
_UpperCAmelCase = """gelu"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=9_9 , lowerCAmelCase__=3_2 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=2_0 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = parent
SCREAMING_SNAKE_CASE_ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = seq_length
SCREAMING_SNAKE_CASE_ : Union[str, Any] = is_training
SCREAMING_SNAKE_CASE_ : Dict = use_labels
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : int = hidden_size
SCREAMING_SNAKE_CASE_ : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = intermediate_size
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Optional[Any] = eos_token_id
SCREAMING_SNAKE_CASE_ : str = pad_token_id
SCREAMING_SNAKE_CASE_ : List[Any] = bos_token_id
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE_ : Dict = prepare_blenderbot_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return config, inputs_dict
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = TFBlenderbotModel(config=lowerCAmelCase__ ).get_decoder()
SCREAMING_SNAKE_CASE_ : Optional[Any] = inputs_dict['input_ids']
SCREAMING_SNAKE_CASE_ : str = input_ids[:1, :]
SCREAMING_SNAKE_CASE_ : List[str] = inputs_dict['attention_mask'][:1, :]
SCREAMING_SNAKE_CASE_ : Optional[Any] = inputs_dict['head_mask']
SCREAMING_SNAKE_CASE_ : Dict = 1
# first forward pass
SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE_ : Any = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE_ : str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE_ : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE_ : str = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE_ : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase__ , lowerCAmelCase__ , rtol=1E-3 )
def a__ ( A__, A__, A__, A__=None, A__=None, A__=None, A__=None, A__=None, ):
if attention_mask is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.cast(tf.math.not_equal(A__, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
SCREAMING_SNAKE_CASE_ : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE_ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __lowercase (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
_UpperCAmelCase = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
_UpperCAmelCase = (
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFBlenderbotModelTester(self )
SCREAMING_SNAKE_CASE_ : Dict = ConfigTester(self , config_class=lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase__ )
@require_tokenizers
@require_tf
class __lowercase (unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = ["""My friends are cool but they eat too many carbs."""]
_UpperCAmelCase = """facebook/blenderbot-400M-distill"""
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.tokenizer(self.src_text , return_tensors='tf' )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model.generate(
model_inputs.input_ids , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowerCAmelCase__ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 101 |
SCREAMING_SNAKE_CASE :int = {str(digit): digit**5 for digit in range(10)}
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(a_ ) )
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(a_ ) )
if __name__ == "__main__":
print(solution())
| 55 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
__magic_name__ : Tuple = logging.getLogger(__name__)
@dataclass
class lowercase__ :
"""simple docstring"""
__lowerCAmelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__lowerCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__lowerCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__lowerCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__lowerCAmelCase : bool = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether tp freeze the encoder."""} )
__lowerCAmelCase : bool = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether to freeze the embeddings."""} )
@dataclass
class lowercase__ :
"""simple docstring"""
__lowerCAmelCase : str = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
__lowerCAmelCase : Optional[str] = field(
default="""summarization""" , metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} , )
__lowerCAmelCase : Optional[int] = field(
default=1024 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__lowerCAmelCase : Optional[int] = field(
default=128 , metadata={
"""help""": (
"""The maximum total sequence length for target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__lowerCAmelCase : Optional[int] = field(
default=142 , metadata={
"""help""": (
"""The maximum total sequence length for validation target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded. """
"""This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """
"""during ``evaluate`` and ``predict``."""
)
} , )
__lowerCAmelCase : Optional[int] = field(
default=142 , metadata={
"""help""": (
"""The maximum total sequence length for test target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__lowerCAmelCase : Optional[int] = field(default=-1 , metadata={"""help""": """# training examples. -1 means use all."""} )
__lowerCAmelCase : Optional[int] = field(default=-1 , metadata={"""help""": """# validation examples. -1 means use all."""} )
__lowerCAmelCase : Optional[int] = field(default=-1 , metadata={"""help""": """# test examples. -1 means use all."""} )
__lowerCAmelCase : Optional[str] = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Source language id for translation."""} )
__lowerCAmelCase : Optional[str] = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Target language id for translation."""} )
__lowerCAmelCase : Optional[int] = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """# num_beams to use for evaluation."""} )
__lowerCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} , )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
logger.info(f"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(f""" {key} = {metrics[key]}""" )
save_json(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , f"""{split}_results.json""" ) )
def UpperCamelCase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = parser.parse_args_into_dataclasses()
check_output_dir(SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase : Tuple = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
assert hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(SCREAMING_SNAKE_CASE , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCamelCase : Dict = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(SCREAMING_SNAKE_CASE , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(SCREAMING_SNAKE_CASE )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCamelCase : Optional[Any] = SeqaSeqDataset
# Get datasets
UpperCamelCase : List[Any] = (
dataset_class(
SCREAMING_SNAKE_CASE , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
UpperCamelCase : Optional[int] = (
dataset_class(
SCREAMING_SNAKE_CASE , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCamelCase : Optional[Any] = (
dataset_class(
SCREAMING_SNAKE_CASE , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCamelCase : Tuple = (
build_compute_metrics_fn(data_args.task , SCREAMING_SNAKE_CASE ) if training_args.predict_with_generate else None
)
UpperCamelCase : Optional[int] = SeqaSeqTrainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , data_args=SCREAMING_SNAKE_CASE , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , data_collator=SeqaSeqDataCollator(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , )
UpperCamelCase : Optional[Any] = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
UpperCamelCase : Optional[Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCamelCase : Dict = train_result.metrics
UpperCamelCase : Optional[int] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCamelCase : List[str] = trainer.evaluate(metric_key_prefix="""val""" )
UpperCamelCase : int = data_args.n_val
UpperCamelCase : Any = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
UpperCamelCase : List[Any] = trainer.predict(test_dataset=SCREAMING_SNAKE_CASE , metric_key_prefix="""test""" )
UpperCamelCase : Union[str, Any] = test_output.metrics
UpperCamelCase : List[str] = data_args.n_test
if trainer.is_world_process_zero():
UpperCamelCase : Dict = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE )
if training_args.predict_with_generate:
UpperCamelCase : str = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = lmap(str.strip , SCREAMING_SNAKE_CASE )
write_txt_file(SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 102 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self : Any ):
__A = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A ,"tf_padding" ) )
self.parent.assertTrue(hasattr(A ,"depth_multiplier" ) )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] ,A : int ,A : List[Any]=13 ,A : int=3 ,A : Optional[Any]=32 ,A : Union[str, Any]=0.25 ,A : Tuple=8 ,A : Optional[int]=True ,A : Union[str, Any]=10_24 ,A : Any=32 ,A : Optional[int]="relu6" ,A : int=0.1 ,A : Optional[Any]=0.02 ,A : Optional[Any]=True ,A : List[str]=True ,A : str=10 ,A : str=None ,):
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = depth_multiplier
__A = min_depth
__A = tf_padding
__A = int(last_hidden_size * depth_multiplier )
__A = output_stride
__A = hidden_act
__A = classifier_dropout_prob
__A = use_labels
__A = is_training
__A = num_labels
__A = initializer_range
__A = scope
def UpperCamelCase_ ( self : Optional[int] ):
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.num_labels )
__A = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
__A = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self : Any ):
return MobileNetVaConfig(
num_channels=self.num_channels ,image_size=self.image_size ,depth_multiplier=self.depth_multiplier ,min_depth=self.min_depth ,tf_padding=self.tf_padding ,hidden_act=self.hidden_act ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,)
def UpperCamelCase_ ( self : Optional[int] ,A : str ,A : Tuple ,A : Optional[int] ,A : List[str] ):
__A = MobileNetVaModel(config=A )
model.to(A )
model.eval()
__A = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def UpperCamelCase_ ( self : List[Any] ,A : Union[str, Any] ,A : List[Any] ,A : int ,A : Union[str, Any] ):
__A = self.num_labels
__A = MobileNetVaForImageClassification(A )
model.to(A )
model.eval()
__A = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : Tuple ):
__A = self.prepare_config_and_inputs()
__A , __A , __A , __A = config_and_inputs
__A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
snake_case_ = (
{"feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def UpperCamelCase_ ( self : Any ):
__A = MobileNetVaModelTester(self )
__A = MobileNetVaConfigTester(self ,config_class=A ,has_text_modality=A )
def UpperCamelCase_ ( self : str ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds" )
def UpperCamelCase_ ( self : Union[str, Any] ):
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings" )
def UpperCamelCase_ ( self : Tuple ):
pass
@unittest.skip(reason="MobileNetV1 does not output attentions" )
def UpperCamelCase_ ( self : Any ):
pass
def UpperCamelCase_ ( self : Optional[int] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
__A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,A )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Optional[int] ):
def check_hidden_states_output(A : List[Any] ,A : List[Any] ,A : Optional[int] ):
__A = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(A ,A ) )
__A = outputs.hidden_states
__A = 26
self.assertEqual(len(A ) ,A )
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = True
check_hidden_states_output(A ,A ,A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A = True
check_hidden_states_output(A ,A ,A )
def UpperCamelCase_ ( self : Tuple ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def UpperCamelCase_ ( self : Union[str, Any] ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = MobileNetVaModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
__A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : List[str] ):
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None
)
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
__A = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(A )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="pt" ).to(A )
# forward pass
with torch.no_grad():
__A = model(**A )
# verify the logits
__A = torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape ,A )
__A = torch.tensor([-4.17_39, -1.12_33, 3.12_05] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,A ,atol=1E-4 ) )
| 55 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,unittest.TestCase ):
A__ : Optional[int] = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Dict=0 ):
"""simple docstring"""
_snake_case = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(__lowerCamelCase ) )
_snake_case = np.random.RandomState(__lowerCamelCase )
_snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.7_5,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**__lowerCamelCase ).images
_snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_snake_case = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**__lowerCamelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_snake_case = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
# warmup pass to apply optimizations
_snake_case = pipe(**self.get_dummy_inputs() )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**__lowerCamelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_snake_case = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**__lowerCamelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**__lowerCamelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**__lowerCamelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_snake_case = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
@property
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = ort.SessionOptions()
_snake_case = False
return options
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_snake_case = init_image.resize((7_6_8, 5_1_2) )
# using the PNDM scheduler by default
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=__lowerCamelCase , feature_extractor=__lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = '''A fantasy landscape, trending on artstation'''
_snake_case = np.random.RandomState(0 )
_snake_case = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__lowerCamelCase , output_type='''np''' , )
_snake_case = output.images
_snake_case = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
_snake_case = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_snake_case = init_image.resize((7_6_8, 5_1_2) )
_snake_case = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=__lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = '''A fantasy landscape, trending on artstation'''
_snake_case = np.random.RandomState(0 )
_snake_case = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__lowerCamelCase , output_type='''np''' , )
_snake_case = output.images
_snake_case = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
_snake_case = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 103 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : str ,A : int ,A : int=2 ,A : Optional[Any]=3 ,A : Dict=4 ,A : Optional[int]=2 ,A : Union[str, Any]=7 ,A : List[str]=True ,A : Union[str, Any]=True ,A : Optional[int]=True ,A : Optional[int]=True ,A : Tuple=99 ,A : Optional[int]=36 ,A : Dict=3 ,A : str=4 ,A : Optional[Any]=37 ,A : Dict="gelu" ,A : Dict=0.1 ,A : Union[str, Any]=0.1 ,A : Union[str, Any]=5_12 ,A : Any=16 ,A : Union[str, Any]=2 ,A : List[Any]=0.02 ,A : List[Any]=6 ,A : Optional[int]=6 ,A : List[Any]=3 ,A : Union[str, Any]=4 ,A : Tuple=None ,A : List[str]=10_00 ,):
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = patch_size
__A = text_seq_length
__A = is_training
__A = use_input_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = coordinate_size
__A = shape_size
__A = num_labels
__A = num_choices
__A = scope
__A = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__A = text_seq_length
__A = (image_size // patch_size) ** 2 + 1
__A = self.text_seq_length + self.image_seq_length
def UpperCamelCase_ ( self : int ):
__A = ids_tensor([self.batch_size, self.text_seq_length] ,self.vocab_size )
__A = ids_tensor([self.batch_size, self.text_seq_length, 4] ,self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__A = bbox[i, j, 3]
__A = bbox[i, j, 1]
__A = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__A = bbox[i, j, 2]
__A = bbox[i, j, 0]
__A = t
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_input_mask:
__A = random_attention_mask([self.batch_size, self.text_seq_length] )
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.text_seq_length] ,self.type_vocab_size )
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.text_seq_length] ,self.num_labels )
__A = LayoutLMvaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,coordinate_size=self.coordinate_size ,shape_size=self.shape_size ,input_size=self.image_size ,patch_size=self.patch_size ,)
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase_ ( self : Optional[int] ,A : List[str] ,A : Any ,A : Dict ,A : List[Any] ,A : Optional[int] ,A : Any ,A : Dict ,A : List[Any] ):
__A = LayoutLMvaModel(config=A )
model.to(A )
model.eval()
# text + image
__A = model(A ,pixel_values=A )
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A )
__A = model(A ,bbox=A ,pixel_values=A ,token_type_ids=A )
__A = model(A ,bbox=A ,pixel_values=A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
# text only
__A = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__A = model(pixel_values=A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Optional[int] ,A : Dict ,A : List[str] ,A : Any ,A : List[Any] ,A : Any ,A : Any ,A : Dict ,A : Optional[Any] ):
__A = self.num_labels
__A = LayoutLMvaForSequenceClassification(A )
model.to(A )
model.eval()
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A ,labels=A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : str ,A : Optional[Any] ,A : Dict ,A : str ,A : Tuple ,A : Union[str, Any] ,A : List[Any] ,A : Any ,A : Union[str, Any] ):
__A = self.num_labels
__A = LayoutLMvaForTokenClassification(config=A )
model.to(A )
model.eval()
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A ,labels=A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Optional[int] ,A : Optional[Any] ,A : int ,A : str ,A : List[str] ,A : int ,A : List[str] ,A : List[str] ,A : Dict ):
__A = LayoutLMvaForQuestionAnswering(config=A )
model.to(A )
model.eval()
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A ,start_positions=A ,end_positions=A ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : str ):
__A = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = config_and_inputs
__A = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case_ = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self : str ,A : Any ,A : Any ,A : Tuple ,A : List[Any] ,A : Optional[Any] ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = LayoutLMvaModelTester(self )
__A = ConfigTester(self ,config_class=A ,hidden_size=37 )
def UpperCamelCase_ ( self : List[Any] ,A : int ,A : List[str] ,A : Dict=False ):
__A = copy.deepcopy(A )
if model_class in get_values(A ):
__A = {
k: v.unsqueeze(1 ).expand(-1 ,self.model_tester.num_choices ,-1 ).contiguous()
if isinstance(A ,torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(A ):
__A = torch.ones(self.model_tester.batch_size ,dtype=torch.long ,device=A )
elif model_class in get_values(A ):
__A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
__A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
elif model_class in [
*get_values(A ),
]:
__A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
elif model_class in [
*get_values(A ),
]:
__A = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) ,dtype=torch.long ,device=A ,)
return inputs_dict
def UpperCamelCase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : str ):
__A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__A = type
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
def UpperCamelCase_ ( self : str ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = LayoutLMvaModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
__A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : Any ):
return LayoutLMvaImageProcessor(apply_ocr=A ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self : Dict ):
__A = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(A )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="pt" ).pixel_values.to(A )
__A = torch.tensor([[1, 2]] )
__A = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
__A = model(
input_ids=input_ids.to(A ) ,bbox=bbox.to(A ) ,pixel_values=pixel_values.to(A ) ,)
# verify the logits
__A = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape ,A )
__A = torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,A ,atol=1E-4 ) )
| 55 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> None:
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 104 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any ,A : List[str] ,A : str=7 ,A : Optional[Any]=3 ,A : Any=18 ,A : int=30 ,A : int=4_00 ,A : List[str]=True ,A : Union[str, Any]=None ,A : Union[str, Any]=True ,A : Tuple=None ,A : Tuple=True ,A : Union[str, Any]=[0.5, 0.5, 0.5] ,A : str=[0.5, 0.5, 0.5] ,A : List[Any]=False ,):
__A = size if size is not None else {"height": 20, "width": 20}
__A = crop_size if crop_size is not None else {"height": 18, "width": 18}
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size
__A = do_center_crop
__A = crop_size
__A = do_normalize
__A = image_mean
__A = image_std
__A = do_reduce_labels
def UpperCamelCase_ ( self : List[str] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
__A = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__A = Image.open(dataset[0]["file"] )
__A = Image.open(dataset[1]["file"] )
return image, map
def UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
__A = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__A = Image.open(ds[0]["file"] )
__A = Image.open(ds[1]["file"] )
__A = Image.open(ds[2]["file"] )
__A = Image.open(ds[3]["file"] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = BeitImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : List[Any] ):
__A = BeitImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : int ):
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A ,"do_resize" ) )
self.assertTrue(hasattr(A ,"size" ) )
self.assertTrue(hasattr(A ,"do_center_crop" ) )
self.assertTrue(hasattr(A ,"center_crop" ) )
self.assertTrue(hasattr(A ,"do_normalize" ) )
self.assertTrue(hasattr(A ,"image_mean" ) )
self.assertTrue(hasattr(A ,"image_std" ) )
def UpperCamelCase_ ( self : List[str] ):
__A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"height": 20, "width": 20} )
self.assertEqual(image_processor.crop_size ,{"height": 18, "width": 18} )
self.assertEqual(image_processor.do_reduce_labels ,A )
__A = self.image_processing_class.from_dict(
self.image_processor_dict ,size=42 ,crop_size=84 ,reduce_labels=A )
self.assertEqual(image_processor.size ,{"height": 42, "width": 42} )
self.assertEqual(image_processor.crop_size ,{"height": 84, "width": 84} )
self.assertEqual(image_processor.do_reduce_labels ,A )
def UpperCamelCase_ ( self : List[Any] ):
pass
def UpperCamelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A ,Image.Image )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : List[str] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A )
for image in image_inputs:
self.assertIsInstance(A ,np.ndarray )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : int ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : str ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
__A = []
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__A = image_processing(image_inputs[0] ,maps[0] ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test batched
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test not batched input (PIL images)
__A , __A = prepare_semantic_single_inputs()
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test batched input (PIL images)
__A , __A = prepare_semantic_batch_inputs()
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
def UpperCamelCase_ ( self : Dict ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__A , __A = prepare_semantic_single_inputs()
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 1_50 )
__A = True
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
| 55 | 0 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
UpperCamelCase__ : List[Any] = importlib.util.find_spec('''s3fs''') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
UpperCamelCase__ : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def __UpperCAmelCase ( lowerCamelCase_ : str ) -> str:
"""simple docstring"""
if "://" in dataset_path:
SCREAMING_SNAKE_CASE_ : Any = dataset_path.split('://' )[1]
return dataset_path
def __UpperCAmelCase ( lowerCamelCase_ : fsspec.AbstractFileSystem ) -> bool:
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def __UpperCAmelCase ( lowerCamelCase_ : fsspec.AbstractFileSystem , lowerCamelCase_ : str , lowerCamelCase_ : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = not is_remote_filesystem(lowerCamelCase_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowerCamelCase_ ) , fs._strip_protocol(lowerCamelCase_ ) )
else:
fs.mv(lowerCamelCase_ , lowerCamelCase_ , recursive=lowerCamelCase_ )
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : List[str] = threading.Lock()
| 105 |
from numpy import exp, pi, sqrt
def UpperCAmelCase ( a_ , a_ = 0.0 , a_ = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case :Dict =logging.get_logger(__name__)
__snake_case :Any ={'vocab_file': 'sentencepiece.bpe.model'}
__snake_case :Union[str, Any] ={
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
__snake_case :Optional[int] ={
'moussaKam/mbarthez': 1024,
'moussaKam/barthez': 1024,
'moussaKam/barthez-orangesum-title': 1024,
}
__snake_case :str ='▁'
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : Union[str, Any] = VOCAB_FILES_NAMES
A_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : str = ['input_ids', 'attention_mask']
def __init__( self : str , __UpperCamelCase : Tuple , __UpperCamelCase : List[str]="<s>" , __UpperCamelCase : Optional[int]="</s>" , __UpperCamelCase : Any="</s>" , __UpperCamelCase : int="<s>" , __UpperCamelCase : Optional[Any]="<unk>" , __UpperCamelCase : Dict="<pad>" , __UpperCamelCase : str="<mask>" , __UpperCamelCase : Optional[Dict[str, Any]] = None , **__UpperCamelCase : Optional[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
A = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
A = vocab_file
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCamelCase ) )
A = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
A = len(self.sp_model ) - 1
A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __UpperCamelCase ( self : Dict , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A = [self.cls_token_id]
A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self : Any , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) + [1]
def __UpperCamelCase ( self : List[str] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
return len(self.sp_model )
def __UpperCamelCase ( self : Tuple ) -> str:
A = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : str ) -> List[str]:
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def __UpperCamelCase ( self : Dict , __UpperCamelCase : Any ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A = self.sp_model.PieceToId(__UpperCamelCase )
return spm_id if spm_id else self.unk_token_id
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : int ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__UpperCamelCase )
def __UpperCamelCase ( self : Any , __UpperCamelCase : Optional[int] ) -> Optional[int]:
A = []
A = ''
A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCamelCase ) + token
A = True
A = []
else:
current_sub_tokens.append(__UpperCamelCase )
A = False
out_string += self.sp_model.decode(__UpperCamelCase )
return out_string.strip()
def __getstate__( self : Any ) -> int:
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : Any , __UpperCamelCase : Dict ) -> int:
A = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCamelCase ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A = os.path.join(
__UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , 'wb' ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
| 106 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCamelCase_ ( self : Optional[int] ):
__A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
__A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
__A = "xvjiarui/stable-diffusion-2-inpainting"
__A , __A = FlaxStableDiffusionInpaintPipeline.from_pretrained(A ,safety_checker=A )
__A = "Face of a yellow cat, high resolution, sitting on a park bench"
__A = jax.random.PRNGKey(0 )
__A = 50
__A = jax.device_count()
__A = num_samples * [prompt]
__A = num_samples * [init_image]
__A = num_samples * [mask_image]
__A , __A , __A = pipeline.prepare_inputs(A ,A ,A )
# shard inputs and rng
__A = replicate(A )
__A = jax.random.split(A ,jax.device_count() )
__A = shard(A )
__A = shard(A )
__A = shard(A )
__A = pipeline(
A ,A ,A ,A ,A ,A ,jit=A )
__A = output.images.reshape(A ,5_12 ,5_12 ,3 )
__A = images[0, 2_53:2_56, 2_53:2_56, -1]
__A = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__A = jnp.array(
[0.3_61_13_07, 0.37_64_97_36, 0.3_75_74_08, 0.38_21_39_53, 0.39_29_51_67, 0.3_84_16_31, 0.41_55_49_78, 0.4_13_74_75, 0.4_21_70_84] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 55 | 0 |
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
_UpperCAmelCase : int = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
_UpperCAmelCase : str = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print('''\n'''.join(upper_files) + '''\n''')
_UpperCAmelCase : str = [file for file in filepaths if ''' ''' in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print('''\n'''.join(space_files) + '''\n''')
_UpperCAmelCase : List[str] = [file for file in filepaths if '''-''' in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print('''\n'''.join(hyphen_files) + '''\n''')
_UpperCAmelCase : List[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print('''\n'''.join(nodir_files) + '''\n''')
_UpperCAmelCase : Any = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 107 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any ,A : Optional[int] ,A : Optional[int]=7 ,A : Optional[Any]=3 ,A : List[str]=18 ,A : Any=30 ,A : Tuple=4_00 ,A : Union[str, Any]=True ,A : Optional[Any]=32 ,A : Union[str, Any]=True ,):
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size_divisor
__A = do_rescale
def UpperCamelCase_ ( self : Union[str, Any] ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = GLPNImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : int ):
__A = GLPNImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Any ):
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A ,"do_resize" ) )
self.assertTrue(hasattr(A ,"size_divisor" ) )
self.assertTrue(hasattr(A ,"resample" ) )
self.assertTrue(hasattr(A ,"do_rescale" ) )
def UpperCamelCase_ ( self : str ):
pass
def UpperCamelCase_ ( self : Dict ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A ,Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def UpperCamelCase_ ( self : Optional[Any] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A )
for image in image_inputs:
self.assertIsInstance(A ,np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def UpperCamelCase_ ( self : int ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 55 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any]=7 , lowerCamelCase : int=3 , lowerCamelCase : int=18 , lowerCamelCase : Tuple=30 , lowerCamelCase : Dict=400 , lowerCamelCase : Optional[int]=True , lowerCamelCase : int=None , lowerCamelCase : List[str]=True , lowerCamelCase : List[Any]=None , lowerCamelCase : Any=True , lowerCamelCase : int=[0.5, 0.5, 0.5] , lowerCamelCase : List[Any]=[0.5, 0.5, 0.5] , ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = size if size is not None else {"""shortest_edge""": 18}
_UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
def lowerCamelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = LevitImageProcessor if is_vision_available() else None
def lowerCamelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = LevitImageProcessingTester(self )
@property
def lowerCamelCase ( self : int ) -> Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase , """image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase , """size""" ) )
def lowerCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def lowerCamelCase ( self : str ) -> int:
"""simple docstring"""
pass
def lowerCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCamelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 108 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case_ = Features({"image": Image()} )
snake_case_ = Features({"labels": ClassLabel} )
snake_case_ = "image"
snake_case_ = "labels"
def UpperCamelCase_ ( self : Optional[Any] ,A : Tuple ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] ,A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
__A = copy.deepcopy(self )
__A = self.label_schema.copy()
__A = features[self.label_column]
__A = label_schema
return task_template
@property
def UpperCamelCase_ ( self : Any ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 55 | 0 |
'''simple docstring'''
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class __a ( unittest.TestCase ):
def __init__( self : Any ,lowerCamelCase : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return {}
def __magic_name__ ( ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR=\"FFFFFF\">
<HR>
<a href=\"http://google.com\">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style=\"color:#0000FF\">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>"""
__SCREAMING_SNAKE_CASE = """
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
"""
return [html_string_a, html_string_a]
@require_bsa
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : Any = MarkupLMFeatureExtractor if is_bsa_available() else None
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = MarkupLMFeatureExtractionTester(self )
@property
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return self.feature_extract_tester.prepare_feat_extract_dict()
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.feature_extraction_class()
# Test not batched input
__SCREAMING_SNAKE_CASE = get_html_strings()[0]
__SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase )
# fmt: off
__SCREAMING_SNAKE_CASE = [["""sample document""", """Goog""", """This is one header""", """This is a another Header""", """Travel from""", """SFO to JFK""", """on May 2, 2015 at 2:00 pm. For details go to confirm.com""", """Traveler""", """name""", """is""", """John Doe"""]]
__SCREAMING_SNAKE_CASE = [["""/html/head/title""", """/html/body/a""", """/html/body/h1""", """/html/body/h2""", """/html/body/p""", """/html/body/p/p/b[1]""", """/html/body/p/p/b[2]/i""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/b""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/p"""]]
# fmt: on
self.assertEqual(encoding.nodes ,lowerCamelCase )
self.assertEqual(encoding.xpaths ,lowerCamelCase )
# Test batched
__SCREAMING_SNAKE_CASE = get_html_strings()
__SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase )
# fmt: off
__SCREAMING_SNAKE_CASE = expected_nodes + [["""My First Heading""", """My first paragraph."""]]
__SCREAMING_SNAKE_CASE = expected_xpaths + [["""/html/body/h1""", """/html/body/p"""]]
self.assertEqual(len(encoding.nodes ) ,2 )
self.assertEqual(len(encoding.xpaths ) ,2 )
self.assertEqual(encoding.nodes ,lowerCamelCase )
self.assertEqual(encoding.xpaths ,lowerCamelCase )
| 109 |
from math import sqrt
def UpperCAmelCase ( a_ ) -> bool:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number >= 0
), "'number' must been an int and positive"
__A = True
# 0 and 1 are none primes.
if number <= 1:
__A = False
for divisor in range(2 , int(round(sqrt(a_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__A = False
break
# precondition
assert isinstance(a_ , a_ ), "'status' must been from type bool"
return status
def UpperCAmelCase ( a_ ) -> Any:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__A = list(range(2 , n + 1 ) )
__A = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(a_ ) ):
for j in range(i + 1 , len(a_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__A = 0
# filters actual prime numbers.
__A = [x for x in begin_list if x != 0]
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n > 2), "'N' must been an int and > 2"
__A = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(a_ ):
ans.append(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and number >= 0, "'number' must been an int and >= 0"
__A = [] # this list will be returns of the function.
# potential prime number factors.
__A = 2
__A = number
if number == 0 or number == 1:
ans.append(a_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(a_ ):
while quotient != 1:
if is_prime(a_ ) and (quotient % factor == 0):
ans.append(a_ )
quotient /= factor
else:
factor += 1
else:
ans.append(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase ( a_ ) -> Any:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__A = 0
# prime factorization of 'number'
__A = prime_factorization(a_ )
__A = max(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type int"
return ans
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__A = 0
# prime factorization of 'number'
__A = prime_factorization(a_ )
__A = min(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type int"
return ans
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
assert isinstance(a_ , a_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , a_ ), "compare bust been from type bool"
return number % 2 == 0
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
assert isinstance(a_ , a_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , a_ ), "compare bust been from type bool"
return number % 2 != 0
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
assert (
isinstance(a_ , a_ ) and (number > 2) and is_even(a_ )
), "'number' must been an int, even and > 2"
__A = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__A = get_prime_numbers(a_ )
__A = len(a_ )
# run variable for while-loops.
__A = 0
__A = None
# exit variable. for break up the loops
__A = True
while i < len_pn and loop:
__A = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__A = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(a_ , a_ )
and (len(a_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def UpperCAmelCase ( a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__A = 0
while numbera != 0:
__A = numbera % numbera
__A = numbera
__A = rest
# precondition
assert isinstance(a_ , a_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def UpperCAmelCase ( a_ , a_ ) -> List[str]:
"""simple docstring"""
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__A = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__A = prime_factorization(a_ )
__A = prime_factorization(a_ )
elif numbera == 1 or numbera == 1:
__A = []
__A = []
__A = max(a_ , a_ )
__A = 0
__A = 0
__A = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__A = prime_fac_a.count(a_ )
__A = prime_fac_a.count(a_ )
for _ in range(max(a_ , a_ ) ):
ans *= n
else:
__A = prime_fac_a.count(a_ )
for _ in range(a_ ):
ans *= n
done.append(a_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__A = prime_fac_a.count(a_ )
for _ in range(a_ ):
ans *= n
done.append(a_ )
# precondition
assert isinstance(a_ , a_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 0), "'number' must been a positive int"
__A = 0
__A = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(a_ ):
ans += 1
# precondition
assert isinstance(a_ , a_ ) and is_prime(
a_ ), "'ans' must been a prime number and from type int"
return ans
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
assert (
is_prime(a_ ) and is_prime(a_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__A = p_number_a + 1 # jump to the next number
__A = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(a_ ):
number += 1
while number < p_number_a:
ans.append(a_ )
number += 1
# fetch the next prime number.
while not is_prime(a_ ):
number += 1
# precondition
assert (
isinstance(a_ , a_ )
and ans[0] != p_number_a
and ans[len(a_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 1), "'n' must been int and >= 1"
__A = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(a_ )
# precondition
assert ans[0] == 1 and ans[len(a_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number > 1
), "'number' must been an int and >= 1"
__A = get_divisors(a_ )
# precondition
assert (
isinstance(a_ , a_ )
and (divisors[0] == 1)
and (divisors[len(a_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def UpperCAmelCase ( a_ , a_ ) -> str:
"""simple docstring"""
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__A = gcd(abs(a_ ) , abs(a_ ) )
# precondition
assert (
isinstance(a_ , a_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 0), "'n' must been a int and >= 0"
__A = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 0), "'n' must been an int and >= 0"
__A = 0
__A = 1
__A = 1 # this will be return
for _ in range(n - 1 ):
__A = ans
ans += fiba
__A = tmp
return ans
| 55 | 0 |
"""simple docstring"""
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCamelCase__ = logging.get_logger(__name__)
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ):
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase__ : List[Any] = to_pil_image(_snake_case )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = pil_image.size
UpperCAmelCase__ : List[str] = pytesseract.image_to_data(_snake_case ,lang=_snake_case ,output_type='dict' ,config=_snake_case )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
UpperCAmelCase__ : Optional[Any] = [idx for idx, word in enumerate(_snake_case ) if not word.strip()]
UpperCAmelCase__ : Dict = [word for idx, word in enumerate(_snake_case ) if idx not in irrelevant_indices]
UpperCAmelCase__ : str = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
UpperCAmelCase__ : Union[str, Any] = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
UpperCAmelCase__ : int = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
UpperCAmelCase__ : str = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCAmelCase__ : Any = []
for x, y, w, h in zip(_snake_case ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase__ : List[str] = [x, y, x + w, y + h]
actual_boxes.append(_snake_case )
# finally, normalize the bounding boxes
UpperCAmelCase__ : List[str] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_snake_case ,_snake_case ,_snake_case ) )
assert len(_snake_case ) == len(_snake_case ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a ( lowercase ):
UpperCamelCase : List[Any] = ["""pixel_values"""]
def __init__( self , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = PILImageResampling.BILINEAR , UpperCamelCase_ = True , UpperCamelCase_ = 1 / 255 , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = "" , **UpperCamelCase_ , ):
super().__init__(**UpperCamelCase_ )
UpperCAmelCase__ : Tuple = size if size is not None else {'height': 224, 'width': 224}
UpperCAmelCase__ : List[Any] = get_size_dict(UpperCamelCase_ )
UpperCAmelCase__ : Any = do_resize
UpperCAmelCase__ : Optional[Any] = size
UpperCAmelCase__ : Optional[int] = resample
UpperCAmelCase__ : List[str] = do_rescale
UpperCAmelCase__ : Union[str, Any] = rescale_value
UpperCAmelCase__ : List[str] = do_normalize
UpperCAmelCase__ : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
UpperCAmelCase__ : Optional[Any] = apply_ocr
UpperCAmelCase__ : Optional[Any] = ocr_lang
UpperCAmelCase__ : Tuple = tesseract_config
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = PILImageResampling.BILINEAR , UpperCamelCase_ = None , **UpperCamelCase_ , ):
UpperCAmelCase__ : Optional[Any] = get_size_dict(UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
UpperCAmelCase__ : List[Any] = (size['height'], size['width'])
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ):
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ):
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_=None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = ChannelDimension.FIRST , **UpperCamelCase_ , ):
UpperCAmelCase__ : List[Any] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : List[str] = size if size is not None else self.size
UpperCAmelCase__ : Optional[Any] = get_size_dict(UpperCamelCase_ )
UpperCAmelCase__ : Any = resample if resample is not None else self.resample
UpperCAmelCase__ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Optional[Any] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : List[Any] = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : str = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCAmelCase__ : Dict = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCAmelCase__ : List[str] = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCAmelCase__ : Any = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('If do_normalize is True, image_mean and image_std must be specified.' )
# All transformations expect numpy arrays.
UpperCAmelCase__ : Optional[Any] = [to_numpy_array(UpperCamelCase_ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , 'pytesseract' )
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : str = []
for image in images:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = apply_tesseract(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
words_batch.append(UpperCamelCase_ )
boxes_batch.append(UpperCamelCase_ )
if do_resize:
UpperCAmelCase__ : Optional[int] = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_rescale:
UpperCAmelCase__ : int = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_normalize:
UpperCAmelCase__ : Optional[Any] = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images]
UpperCAmelCase__ : List[str] = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
UpperCAmelCase__ : int = BatchFeature(data={'pixel_values': images} , tensor_type=UpperCamelCase_ )
if apply_ocr:
UpperCAmelCase__ : List[Any] = words_batch
UpperCAmelCase__ : Any = boxes_batch
return data
| 110 |
import os
def UpperCAmelCase ( ) -> Any:
"""simple docstring"""
__A = os.path.dirname(os.path.realpath(a_ ) )
__A = os.path.join(a_ , "triangle.txt" )
with open(a_ ) as f:
__A = f.readlines()
__A = []
for line in triangle:
__A = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(a_ ) )
a.append(a_ )
for i in range(1 , len(a_ ) ):
for j in range(len(a[i] ) ):
__A = a[i - 1][j] if j != len(a[i - 1] ) else 0
__A = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(a_ , a_ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 55 | 0 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self ) -> int:
lowercase__ : Dict = """"""
lowercase__ : Dict = """"""
lowercase__ : Tuple = []
lowercase__ : Any = 0
lowercase__ : Dict = 256
lowercase__ : Tuple = 0
lowercase__ : Optional[int] = 0
lowercase__ : List[Any] = 0
lowercase__ : str = 0
def UpperCAmelCase__( self , lowerCamelCase__ ) -> str:
lowercase__ : Tuple = cva.imread(lowerCamelCase__ , 0 )
lowercase__ : Union[str, Any] = copy.deepcopy(self.img )
lowercase__ , lowercase__ , lowercase__ : int = plt.hist(self.img.ravel() , 256 , [0, 256] , label="""x""" )
lowercase__ : Optional[Any] = np.sum(lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) ):
lowercase__ : int = x[i] / self.k
self.sk += prk
lowercase__ : Optional[int] = (self.L - 1) * self.sk
if self.rem != 0:
lowercase__ : Union[str, Any] = int(last % last )
lowercase__ : Tuple = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCamelCase__ )
lowercase__ : Dict = int(np.ma.count(self.img ) / self.img[1].size )
lowercase__ : List[str] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowercase__ : int = self.img[j][i]
if num != self.last_list[num]:
lowercase__ : Dict = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def UpperCAmelCase__( self ) -> Any:
plt.hist(self.img.ravel() , 256 , [0, 256] )
def UpperCAmelCase__( self ) -> str:
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
__snake_case = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
__snake_case = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 200 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
SCREAMING_SNAKE_CASE :Union[str, Any] = object()
# For specifying empty leaf dict `{}`
SCREAMING_SNAKE_CASE :List[str] = object()
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
__A = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(a_ ) - len(a_ ) + 1 ):
__A = [x.match(a_ ) for x, y in zip(a_ , ks[i:] )]
if matches and all(a_ ):
return True
return False
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
def replace(a_ , a_ ):
for rule, replacement in rules:
if _match(a_ , a_ ):
return replacement
return val
return replace
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , a_ )),
(("transformer", "wte", "embedding"), P("mp" , a_ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(a_ , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , a_ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(a_ , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , a_ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
__A = _get_partition_rules()
__A = _replacement_rules(a_ )
__A = {k: _unmatched for k in flatten_dict(a_ )}
__A = {k: replace(a_ , a_ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(a_ ) )
| 55 | 0 |
import os
def lowerCamelCase_ ( ) -> Any:
"""simple docstring"""
snake_case_ : int = os.path.dirname(os.path.realpath(a_ ) )
snake_case_ : Union[str, Any] = os.path.join(a_ , '''triangle.txt''' )
with open(a_ ) as f:
snake_case_ : List[Any] = f.readlines()
snake_case_ : Union[str, Any] = []
for line in triangle:
snake_case_ : List[Any] = []
for number in line.strip().split(''' ''' ):
numbers_from_line.append(int(a_ ) )
a.append(a_ )
for i in range(1 , len(a_ ) ):
for j in range(len(a[i] ) ):
snake_case_ : List[Any] = a[i - 1][j] if j != len(a[i - 1] ) else 0
snake_case_ : Optional[int] = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(a_ , a_ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 60 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] ,A : Union[str, Any] ,A : List[Any]=13 ,A : Optional[Any]=30 ,A : Union[str, Any]=2 ,A : Union[str, Any]=3 ,A : Any=True ,A : Dict=True ,A : str=32 ,A : Tuple=2 ,A : Optional[int]=4 ,A : Tuple=37 ,A : List[Any]="gelu" ,A : Dict=0.1 ,A : Optional[int]=0.1 ,A : List[Any]=10 ,A : Optional[Any]=0.02 ,A : Dict=3 ,A : Dict=None ,A : List[Any]=2 ,):
__A = parent
__A = batch_size
__A = image_size
__A = patch_size
__A = num_channels
__A = is_training
__A = use_labels
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = type_sequence_label_size
__A = initializer_range
__A = scope
__A = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__A = (image_size // patch_size) ** 2
__A = num_patches + 2
def UpperCamelCase_ ( self : List[Any] ):
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__A = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : Optional[int] ):
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=A ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def UpperCamelCase_ ( self : List[str] ,A : List[Any] ,A : Optional[int] ,A : Union[str, Any] ):
__A = TFDeiTModel(config=A )
__A = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : List[Any] ,A : List[Any] ,A : Optional[Any] ,A : Dict ):
__A = TFDeiTForMaskedImageModeling(config=A )
__A = model(A )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__A = 1
__A = TFDeiTForMaskedImageModeling(A )
__A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A = model(A )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase_ ( self : Optional[Any] ,A : Union[str, Any] ,A : Dict ,A : Union[str, Any] ):
__A = self.type_sequence_label_size
__A = TFDeiTForImageClassification(A )
__A = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__A = 1
__A = TFDeiTForImageClassification(A )
__A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self : str ):
__A = self.prepare_config_and_inputs()
__A , __A , __A = config_and_inputs
__A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
snake_case_ = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def UpperCamelCase_ ( self : str ):
__A = TFDeiTModelTester(self )
__A = ConfigTester(self ,config_class=A ,has_text_modality=A ,hidden_size=37 )
def UpperCamelCase_ ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def UpperCamelCase_ ( self : Union[str, Any] ):
pass
def UpperCamelCase_ ( self : List[Any] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
__A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A ,tf.keras.layers.Dense ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
__A = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,A )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCamelCase_ ( self : Optional[int] ,A : Union[str, Any] ,A : List[str] ,A : Optional[Any]=False ):
__A = super()._prepare_for_class(A ,A ,return_labels=A )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def UpperCamelCase_ ( self : Any ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = TFDeiTModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
__A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : int ):
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase_ ( self : Optional[int] ):
__A = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="tf" )
# forward pass
__A = model(**A )
# verify the logits
__A = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape ,A )
__A = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,A ,atol=1E-4 ) )
| 55 | 0 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'Visual-Attention-Network/van-base': (
'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'
),
}
class __UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
__lowerCamelCase : Union[str, Any] = "van"
def __init__( self : int , a_ : str=2_24 , a_ : int=3 , a_ : Optional[Any]=[7, 3, 3, 3] , a_ : Tuple=[4, 2, 2, 2] , a_ : Union[str, Any]=[64, 1_28, 3_20, 5_12] , a_ : Optional[int]=[3, 3, 12, 3] , a_ : Optional[int]=[8, 8, 4, 4] , a_ : Any="gelu" , a_ : Union[str, Any]=0.02 , a_ : Union[str, Any]=1E-6 , a_ : Dict=1E-2 , a_ : str=0.0 , a_ : Optional[int]=0.0 , **a_ : List[Any] , ) -> Any:
'''simple docstring'''
super().__init__(**a_ )
a__ : Optional[Any] = image_size
a__ : Union[str, Any] = num_channels
a__ : Optional[Any] = patch_sizes
a__ : Any = strides
a__ : Union[str, Any] = hidden_sizes
a__ : Tuple = depths
a__ : int = mlp_ratios
a__ : int = hidden_act
a__ : Optional[int] = initializer_range
a__ : int = layer_norm_eps
a__ : Union[str, Any] = layer_scale_init_value
a__ : str = drop_path_rate
a__ : List[str] = dropout_rate
| 642 |
SCREAMING_SNAKE_CASE :List[Any] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
SCREAMING_SNAKE_CASE :Union[str, Any] = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
SCREAMING_SNAKE_CASE :int = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def UpperCAmelCase ( a_ , a_ , a_ ) -> str:
"""simple docstring"""
assert len(str(a_ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 1_2, "month should be between 1 to 12"
assert 1 <= day <= 3_1, "day should be between 1 to 31"
# Doomsday algorithm:
__A = year // 1_0_0
__A = (5 * (century % 4) + 2) % 7
__A = year % 1_0_0
__A = centurian % 1_2
__A = (
(centurian // 1_2) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__A = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_0_0) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__A = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55 | 0 |
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class a__ :
__lowerCAmelCase = None
def __magic_name__ ( self ):
lowercase : Any = self.feature_extraction_class(**self.feat_extract_dict )
lowercase : str = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , _a )
def __magic_name__ ( self ):
lowercase : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Optional[Any] = os.path.join(_a , "feat_extract.json" )
feat_extract_first.to_json_file(_a )
lowercase : Union[str, Any] = self.feature_extraction_class.from_json_file(_a )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __magic_name__ ( self ):
lowercase : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Dict = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
lowercase : Optional[Any] = self.feature_extraction_class.from_pretrained(_a )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __magic_name__ ( self ):
lowercase : Dict = self.feature_extraction_class()
self.assertIsNotNone(_a )
| 361 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def UpperCAmelCase ( a_ = "isbn/0140328726" ) -> dict:
"""simple docstring"""
__A = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
__A = F'''{olid} is not a valid Open Library olid'''
raise ValueError(a_ )
return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json()
def UpperCAmelCase ( a_ ) -> dict:
"""simple docstring"""
__A = {
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
__A = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__A = [
get_openlibrary_data(author["key"] )["name"] for author in data["Authors"]
]
__A = data["First sentence"]["value"]
for key, value in data.items():
if isinstance(a_ , a_ ):
__A = ", ".join(a_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
SCREAMING_SNAKE_CASE :int = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
SCREAMING_SNAKE_CASE :Any = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print('\n'.join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 55 | 0 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowercase : Optional[Any] = logging.get_logger(__name__)
class __snake_case :
def __init__( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : List[str] = question_encoder
lowercase : int = generator
lowercase : Optional[Any] = self.question_encoder
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if os.path.isfile(snake_case ):
raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(snake_case ,exist_ok=snake_case )
lowercase : Optional[Any] = os.path.join(snake_case ,"""question_encoder_tokenizer""" )
lowercase : List[Any] = os.path.join(snake_case ,"""generator_tokenizer""" )
self.question_encoder.save_pretrained(snake_case )
self.generator.save_pretrained(snake_case )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ,snake_case ,**snake_case ):
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
lowercase : List[Any] = kwargs.pop("""config""" ,snake_case )
if config is None:
lowercase : int = RagConfig.from_pretrained(snake_case )
lowercase : Optional[Any] = AutoTokenizer.from_pretrained(
snake_case ,config=config.question_encoder ,subfolder="""question_encoder_tokenizer""" )
lowercase : List[Any] = AutoTokenizer.from_pretrained(
snake_case ,config=config.generator ,subfolder="""generator_tokenizer""" )
return cls(question_encoder=snake_case ,generator=snake_case )
def __call__( self ,*snake_case ,**snake_case ):
'''simple docstring'''
return self.current_tokenizer(*snake_case ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,*snake_case ,**snake_case ):
'''simple docstring'''
return self.generator.batch_decode(*snake_case ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,*snake_case ,**snake_case ):
'''simple docstring'''
return self.generator.decode(*snake_case ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = self.question_encoder
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = self.generator
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = "longest" ,snake_case = None ,snake_case = True ,**snake_case ,):
'''simple docstring'''
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" ,snake_case ,)
if max_length is None:
lowercase : str = self.current_tokenizer.model_max_length
lowercase : int = self(
snake_case ,add_special_tokens=snake_case ,return_tensors=snake_case ,max_length=snake_case ,padding=snake_case ,truncation=snake_case ,**snake_case ,)
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
lowercase : Any = self.current_tokenizer.model_max_length
lowercase : int = self(
text_target=snake_case ,add_special_tokens=snake_case ,return_tensors=snake_case ,padding=snake_case ,max_length=snake_case ,truncation=snake_case ,**snake_case ,)
lowercase : List[Any] = labels["""input_ids"""]
return model_inputs
| 336 |
import requests
SCREAMING_SNAKE_CASE :List[str] = 'YOUR API KEY'
def UpperCAmelCase ( a_ , a_ = giphy_api_key ) -> list:
"""simple docstring"""
__A = "+".join(query.split() )
__A = F'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
__A = requests.get(a_ ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 55 | 0 |
'''simple docstring'''
__snake_case: Dict = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__snake_case: int = [{'type': 'code', 'content': INSTALL_CONTENT}]
__snake_case: List[str] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 577 |
import itertools
import math
def UpperCAmelCase ( a_ ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
__A = 2
while True:
if is_prime(a_ ):
yield num
num += 1
def UpperCAmelCase ( a_ = 1_0_0_0_1 ) -> int:
"""simple docstring"""
return next(itertools.islice(prime_generator() , nth - 1 , a_ ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 55 | 0 |
"""simple docstring"""
def a_ ( __a ):
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(a_ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('doctest').testmod()
| 571 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCAmelCase ( a_ , a_ , a_ ) -> List[str]:
"""simple docstring"""
__A = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
__A = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(a_ ):
os.makedirs(a_ )
__A = model.state_dict()
def to_tf_var_name(a_ ):
for patt, repl in iter(a_ ):
__A = name.replace(a_ , a_ )
return F'''bert/{name}'''
def create_tf_var(a_ , a_ , a_ ):
__A = tf.dtypes.as_dtype(tensor.dtype )
__A = tf.get_variable(dtype=a_ , shape=tensor.shape , name=a_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(a_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__A = to_tf_var_name(a_ )
__A = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__A = torch_tensor.T
__A = create_tf_var(tensor=a_ , name=a_ , session=a_ )
tf.keras.backend.set_value(a_ , a_ )
__A = session.run(a_ )
print(F'''Successfully created {tf_name}: {np.allclose(a_ , a_ )}''' )
__A = tf.train.Saver(tf.trainable_variables() )
saver.save(a_ , os.path.join(a_ , model_name.replace("-" , "_" ) + ".ckpt" ) )
def UpperCAmelCase ( a_=None ) -> List[Any]:
"""simple docstring"""
__A = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=a_ , required=a_ , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=a_ , default=a_ , required=a_ , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=a_ , required=a_ , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=a_ , required=a_ , help="Directory in which to save tensorflow model" )
__A = parser.parse_args(a_ )
__A = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=a_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 55 | 0 |
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def __A ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = TOKENIZER_CLASSES
else:
__SCREAMING_SNAKE_CASE : List[Any] = {tokenizer_name: getattr(a_ , tokenizer_name + "Fast" )}
logger.info(f'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
__SCREAMING_SNAKE_CASE : Optional[Any] = TOKENIZER_CLASSES[tokenizer_name]
__SCREAMING_SNAKE_CASE : Any = True
if checkpoint_name is None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [checkpoint_name]
logger.info(f'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(f'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
__SCREAMING_SNAKE_CASE : int = tokenizer_class.from_pretrained(a_ , force_download=a_ )
# Save fast tokenizer
logger.info(f'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = checkpoint.split("/" )
__SCREAMING_SNAKE_CASE : Any = os.path.join(a_ , a_ )
elif add_prefix:
__SCREAMING_SNAKE_CASE : Any = checkpoint
__SCREAMING_SNAKE_CASE : Optional[int] = dump_path
else:
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE : Any = dump_path
logger.info(f'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__SCREAMING_SNAKE_CASE : int = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__SCREAMING_SNAKE_CASE : Any = file_path.split(a_ )[-1][0]
if next_char == "/":
__SCREAMING_SNAKE_CASE : List[Any] = os.path.join(a_ , a_ )
__SCREAMING_SNAKE_CASE : int = None
logger.info(f'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.save_pretrained(
a_ , legacy_format=a_ , filename_prefix=a_ )
logger.info(f'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(a_ )
logger.info(f'=> removing {file_name}' )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
lowercase = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 211 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE :Any = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Any = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case : List[str] = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[Any] = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
snake_case : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 545 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE :int = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def UpperCAmelCase ( a_ = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
__A = BeautifulSoup(requests.get(url + location ).content , "html.parser" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("div" , attrs={"data-tn-component": "organicJob"} ):
__A = job.find("a" , attrs={"data-tn-element": "jobTitle"} ).text.strip()
__A = job.find("span" , {"class": "company"} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f'''Job {i:>2} is {job[0]} at {job[1]}''')
| 55 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : Tuple )-> list:
if any(not isinstance(a_ , a_ ) or x < 0 for x in sequence ):
raise TypeError('Sequence must be list of non-negative integers' )
for _ in range(len(a_ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(a_ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 650 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : List[str] ):
__A = tempfile.mkdtemp()
__A = BlipImageProcessor()
__A = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
__A = BlipaProcessor(A ,A )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Dict ,**A : int ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**A ).tokenizer
def UpperCamelCase_ ( self : Dict ,**A : Optional[int] ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**A ).image_processor
def UpperCamelCase_ ( self : Dict ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self : Optional[int] ):
__A = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
__A = [Image.fromarray(np.moveaxis(A ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self : Any ):
__A = BlipaProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" )
__A = self.get_image_processor(do_normalize=A ,padding_value=1.0 )
__A = BlipaProcessor.from_pretrained(
self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=A ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,A )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,A )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = self.prepare_image_inputs()
__A = image_processor(A ,return_tensors="np" )
__A = processor(images=A ,return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase_ ( self : Tuple ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = processor(text=A )
__A = tokenizer(A ,return_token_type_ids=A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase_ ( self : int ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = self.prepare_image_inputs()
__A = processor(text=A ,images=A )
self.assertListEqual(list(inputs.keys() ) ,["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A = processor.batch_decode(A )
__A = tokenizer.batch_decode(A )
self.assertListEqual(A ,A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = self.prepare_image_inputs()
__A = processor(text=A ,images=A )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,["pixel_values", "input_ids", "attention_mask"] )
| 55 | 0 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n """
__SCREAMING_SNAKE_CASE = """\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n """
__SCREAMING_SNAKE_CASE = """\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n """
# Force fetching the files so that we can use the cache
__SCREAMING_SNAKE_CASE = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
BertModel.from_pretrained(__SCREAMING_SNAKE_CASE )
BertTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
pipeline(task="""fill-mask""" , model=__SCREAMING_SNAKE_CASE )
# baseline - just load from_pretrained with normal network
__SCREAMING_SNAKE_CASE = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
__SCREAMING_SNAKE_CASE = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__SCREAMING_SNAKE_CASE = """1"""
__SCREAMING_SNAKE_CASE = subprocess.run(__SCREAMING_SNAKE_CASE , env=__SCREAMING_SNAKE_CASE , check=__SCREAMING_SNAKE_CASE , capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n """
__SCREAMING_SNAKE_CASE = """\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n """
__SCREAMING_SNAKE_CASE = """\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n """
# Force fetching the files so that we can use the cache
__SCREAMING_SNAKE_CASE = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
BertModel.from_pretrained(__SCREAMING_SNAKE_CASE )
BertTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
pipeline(task="""fill-mask""" , model=__SCREAMING_SNAKE_CASE )
# baseline - just load from_pretrained with normal network
__SCREAMING_SNAKE_CASE = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
__SCREAMING_SNAKE_CASE = self.get_env()
__SCREAMING_SNAKE_CASE = subprocess.run(__SCREAMING_SNAKE_CASE , env=__SCREAMING_SNAKE_CASE , check=__SCREAMING_SNAKE_CASE , capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """\nfrom transformers import BertConfig, BertModel, BertTokenizer\n """
__SCREAMING_SNAKE_CASE = """\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n """
__SCREAMING_SNAKE_CASE = """\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n """
# baseline - just load from_pretrained with normal network
__SCREAMING_SNAKE_CASE = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
__SCREAMING_SNAKE_CASE = self.get_env()
__SCREAMING_SNAKE_CASE = subprocess.run(__SCREAMING_SNAKE_CASE , env=__SCREAMING_SNAKE_CASE , check=__SCREAMING_SNAKE_CASE , capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# next emulate no network
__SCREAMING_SNAKE_CASE = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__SCREAMING_SNAKE_CASE = """1"""
__SCREAMING_SNAKE_CASE = subprocess.run(__SCREAMING_SNAKE_CASE , env=__SCREAMING_SNAKE_CASE , check=__SCREAMING_SNAKE_CASE , capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """\nfrom transformers import pipeline\n """
__SCREAMING_SNAKE_CASE = """\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n """
__SCREAMING_SNAKE_CASE = """\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n """
__SCREAMING_SNAKE_CASE = self.get_env()
__SCREAMING_SNAKE_CASE = """1"""
__SCREAMING_SNAKE_CASE = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
__SCREAMING_SNAKE_CASE = subprocess.run(__SCREAMING_SNAKE_CASE , env=__SCREAMING_SNAKE_CASE , check=__SCREAMING_SNAKE_CASE , capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"""You cannot infer task automatically within `pipeline` when using offline mode""" , result.stderr.decode().replace("""\n""" , """""" ) , )
@require_torch
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """\nfrom transformers import AutoModel\n """
__SCREAMING_SNAKE_CASE = """\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n """
# baseline - just load from_pretrained with normal network
__SCREAMING_SNAKE_CASE = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
__SCREAMING_SNAKE_CASE = self.get_env()
__SCREAMING_SNAKE_CASE = subprocess.run(__SCREAMING_SNAKE_CASE , env=__SCREAMING_SNAKE_CASE , check=__SCREAMING_SNAKE_CASE , capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__SCREAMING_SNAKE_CASE = """1"""
__SCREAMING_SNAKE_CASE = subprocess.run(__SCREAMING_SNAKE_CASE , env=__SCREAMING_SNAKE_CASE , check=__SCREAMING_SNAKE_CASE , capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
| 627 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int ,A : Any ,A : List[str] ,A : Union[str, Any]=10_24 ,A : int=10_24 ,A : Optional[Any]=3.6 ):
__A = tokenizer
__A = tokenizer.bos_token_id
__A = dataset
__A = seq_length
__A = seq_length * chars_per_token * num_of_sequences
def __iter__( self : List[Any] ):
__A = iter(self.dataset )
__A = True
while more_examples:
__A , __A = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(A )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
__A = False
break
__A = tokenizer(A ,truncation=A )["input_ids"]
__A = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 ,len(A ) ,self.seq_length ):
__A = all_token_ids[i : i + self.seq_length]
if len(A ) == self.seq_length:
yield torch.tensor(A )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
__A = {"streaming": True}
__A = load_dataset(args.dataset_name , split="train" , **a_ )
__A = ConstantLengthDataset(a_ , a_ , seq_length=args.seq_length )
__A = DataLoader(a_ , batch_size=args.batch_size )
return eval_dataloader
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
model.eval()
__A = []
for step, batch in enumerate(a_ ):
with torch.no_grad():
__A = model(a_ , labels=a_ )
__A = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(a_ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__A = torch.mean(torch.cat(a_ ) )
try:
__A = torch.exp(a_ )
except OverflowError:
__A = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
SCREAMING_SNAKE_CASE :Optional[int] = Accelerator()
# Parse configuration
SCREAMING_SNAKE_CASE :str = HfArgumentParser(EvaluationArguments)
SCREAMING_SNAKE_CASE :int = parser.parse_args()
set_seed(args.seed)
# Logging
SCREAMING_SNAKE_CASE :Dict = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
SCREAMING_SNAKE_CASE :List[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
SCREAMING_SNAKE_CASE :int = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
SCREAMING_SNAKE_CASE :List[str] = create_dataloader(args)
# Prepare everything with our `accelerator`.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :List[Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :List[Any] = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 55 | 0 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_a : Optional[int] = ['''image_processor''', '''tokenizer''']
_a : Union[str, Any] = '''BlipImageProcessor'''
_a : List[str] = '''AutoTokenizer'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
# add QFormer tokenizer
lowercase__ : Optional[int] = qformer_tokenizer
def __call__( self , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = True , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = True , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> List[Any]:
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
lowercase__ : int = BatchFeature()
if text is not None:
lowercase__ : str = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
encoding.update(lowerCamelCase__ )
lowercase__ : Any = self.qformer_tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
lowercase__ : List[str] = qformer_text_encoding.pop("""input_ids""" )
lowercase__ : Optional[Any] = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
lowercase__ : List[Any] = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ )
encoding.update(lowerCamelCase__ )
return encoding
def UpperCAmelCase__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCAmelCase__( self ) -> Dict:
lowercase__ : Any = self.tokenizer.model_input_names
lowercase__ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCAmelCase__( self , lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
if os.path.isfile(lowerCamelCase__ ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
lowercase__ : int = os.path.join(lowerCamelCase__ , """qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(lowerCamelCase__ )
return super().save_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
@classmethod
def UpperCAmelCase__( cls , lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
lowercase__ : int = AutoTokenizer.from_pretrained(lowerCamelCase__ , subfolder="""qformer_tokenizer""" )
lowercase__ : Dict = cls._get_arguments_from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
args.append(lowerCamelCase__ )
return cls(*lowerCamelCase__ )
| 200 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = LayoutLMTokenizer
snake_case_ = LayoutLMTokenizerFast
snake_case_ = True
snake_case_ = True
def UpperCamelCase_ ( self : Any ):
super().setUp()
__A = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCamelCase_ ( self : Tuple ,**A : int ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**A )
def UpperCamelCase_ ( self : Optional[Any] ,A : Any ):
__A = "UNwant\u00E9d,running"
__A = "unwanted, running"
return input_text, output_text
def UpperCamelCase_ ( self : str ):
__A = self.tokenizer_class(self.vocab_file )
__A = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(A ,["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,[7, 4, 5, 10, 8, 9] )
def UpperCamelCase_ ( self : int ):
pass
| 55 | 0 |
import os
import sys
import unittest
lowerCAmelCase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowerCAmelCase_ = os.path.join(git_repo_path, '''src''', '''transformers''')
lowerCAmelCase_ = '\n{0} = None\n'
lowerCAmelCase_ = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
lowerCAmelCase_ = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : int = find_backend(''' _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")''' )
self.assertIsNone(__magic_name__ )
snake_case_ : List[str] = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(__magic_name__ , '''tokenizers''' )
snake_case_ : Optional[int] = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(__magic_name__ , '''tensorflow_text''' )
snake_case_ : int = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(__magic_name__ , '''sentencepiece_and_tokenizers''' )
snake_case_ : Union[str, Any] = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(__magic_name__ , '''sentencepiece_and_tensorflow_text''' )
snake_case_ : str = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(__magic_name__ , '''sentencepiece_and_tokenizers_and_vision''' )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : List[str] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , __magic_name__ )
self.assertIn('''tensorflow_text''' , __magic_name__ )
self.assertIn('''sentencepiece_and_tokenizers''' , __magic_name__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertModel''' , objects['''tf'''] )
self.assertIn('''FlaxBertModel''' , objects['''flax'''] )
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(__magic_name__ , '''\nCONSTANT = None\n''' )
snake_case_ : Tuple = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
__magic_name__ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
snake_case_ : Any = '''\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'''
snake_case_ : Optional[int] = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(__magic_name__ , __magic_name__ )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = '''# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n'''
snake_case_ : List[str] = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , __magic_name__ )
| 60 |
SCREAMING_SNAKE_CASE :int = {str(digit): digit**5 for digit in range(10)}
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(a_ ) )
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(a_ ) )
if __name__ == "__main__":
print(solution())
| 55 | 0 |
"""simple docstring"""
from manim import *
class __UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
def UpperCAmelCase ( self : str ) -> List[str]:
'''simple docstring'''
a__ : Union[str, Any] = Rectangle(height=0.5 , width=0.5 )
a__ : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
a__ : str = [mem.copy() for i in range(6 )]
a__ : Tuple = [mem.copy() for i in range(6 )]
a__ : List[str] = VGroup(*a_ ).arrange(a_ , buff=0 )
a__ : Union[str, Any] = VGroup(*a_ ).arrange(a_ , buff=0 )
a__ : Optional[Any] = VGroup(a_ , a_ ).arrange(a_ , buff=0 )
a__ : int = Text("CPU" , font_size=24 )
a__ : Any = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(a_ )
a__ : Tuple = [mem.copy() for i in range(1 )]
a__ : Any = VGroup(*a_ ).arrange(a_ , buff=0 )
a__ : Optional[Any] = Text("GPU" , font_size=24 )
a__ : Optional[int] = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
gpu.align_to(a_ , a_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(a_ )
a__ : int = [mem.copy() for i in range(6 )]
a__ : Optional[int] = VGroup(*a_ ).arrange(a_ , buff=0 )
a__ : Optional[Any] = Text("Model" , font_size=24 )
a__ : Optional[int] = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(a_ , run_time=1 ) , Create(a_ , run_time=1 ) , Create(a_ , run_time=1 ) , )
a__ : int = MarkupText(
F"First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM." , font_size=24 , )
a__ : Union[str, Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
a__ : str = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(a_ , run_time=2.5 ) , Write(a_ ) , Write(a_ ) )
self.add(a_ )
a__ : Any = []
a__ : Union[str, Any] = []
a__ : List[str] = []
for i, rect in enumerate(a_ ):
a__ : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(a_ , opacity=0.7 )
cpu_target.move_to(a_ )
cpu_target.generate_target()
a__ : Optional[int] = 0.46 / 4
a__ : Optional[int] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=a_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=a_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=a_ , buff=0.0 )
cpu_targs.append(a_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(a_ ) )
second_animations.append(MoveToTarget(a_ , run_time=1.5 ) )
self.play(*a_ )
self.play(*a_ )
self.wait()
| 642 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self : Any ):
__A = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A ,"tf_padding" ) )
self.parent.assertTrue(hasattr(A ,"depth_multiplier" ) )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] ,A : int ,A : List[Any]=13 ,A : int=3 ,A : Optional[Any]=32 ,A : Union[str, Any]=0.25 ,A : Tuple=8 ,A : Optional[int]=True ,A : Union[str, Any]=10_24 ,A : Any=32 ,A : Optional[int]="relu6" ,A : int=0.1 ,A : Optional[Any]=0.02 ,A : Optional[Any]=True ,A : List[str]=True ,A : str=10 ,A : str=None ,):
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = depth_multiplier
__A = min_depth
__A = tf_padding
__A = int(last_hidden_size * depth_multiplier )
__A = output_stride
__A = hidden_act
__A = classifier_dropout_prob
__A = use_labels
__A = is_training
__A = num_labels
__A = initializer_range
__A = scope
def UpperCamelCase_ ( self : Optional[int] ):
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.num_labels )
__A = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
__A = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self : Any ):
return MobileNetVaConfig(
num_channels=self.num_channels ,image_size=self.image_size ,depth_multiplier=self.depth_multiplier ,min_depth=self.min_depth ,tf_padding=self.tf_padding ,hidden_act=self.hidden_act ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,)
def UpperCamelCase_ ( self : Optional[int] ,A : str ,A : Tuple ,A : Optional[int] ,A : List[str] ):
__A = MobileNetVaModel(config=A )
model.to(A )
model.eval()
__A = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def UpperCamelCase_ ( self : List[Any] ,A : Union[str, Any] ,A : List[Any] ,A : int ,A : Union[str, Any] ):
__A = self.num_labels
__A = MobileNetVaForImageClassification(A )
model.to(A )
model.eval()
__A = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : Tuple ):
__A = self.prepare_config_and_inputs()
__A , __A , __A , __A = config_and_inputs
__A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
snake_case_ = (
{"feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def UpperCamelCase_ ( self : Any ):
__A = MobileNetVaModelTester(self )
__A = MobileNetVaConfigTester(self ,config_class=A ,has_text_modality=A )
def UpperCamelCase_ ( self : str ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds" )
def UpperCamelCase_ ( self : Union[str, Any] ):
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings" )
def UpperCamelCase_ ( self : Tuple ):
pass
@unittest.skip(reason="MobileNetV1 does not output attentions" )
def UpperCamelCase_ ( self : Any ):
pass
def UpperCamelCase_ ( self : Optional[int] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
__A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,A )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Optional[int] ):
def check_hidden_states_output(A : List[Any] ,A : List[Any] ,A : Optional[int] ):
__A = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(A ,A ) )
__A = outputs.hidden_states
__A = 26
self.assertEqual(len(A ) ,A )
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = True
check_hidden_states_output(A ,A ,A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A = True
check_hidden_states_output(A ,A ,A )
def UpperCamelCase_ ( self : Tuple ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def UpperCamelCase_ ( self : Union[str, Any] ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = MobileNetVaModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
__A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : List[str] ):
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None
)
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
__A = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(A )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="pt" ).to(A )
# forward pass
with torch.no_grad():
__A = model(**A )
# verify the logits
__A = torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape ,A )
__A = torch.tensor([-4.17_39, -1.12_33, 3.12_05] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,A ,atol=1E-4 ) )
| 55 | 0 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __magic_name__ ( __snake_case : List[Any] ) -> List[Any]:
lowercase : int = os.path.join(args.tf_model_dir , "parameters.json" )
lowercase : Dict = json.loads(open(a_ ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(".pt" ):
lowercase : List[str] = args.output + ".pt"
lowercase : Optional[int] = OrderedDict()
with tf.device("/CPU:0" ):
lowercase : Tuple = tf.train.load_checkpoint(args.tf_model_dir )
lowercase : Union[str, Any] = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowercase : List[str] = reader.get_tensor(a_ ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
lowercase : Optional[Any] = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
lowercase : str = 8
lowercase : List[str] = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowercase : Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : List[Any] = torch.tensor(a_ )
elif key_name.startswith("model/moe" ):
lowercase : Any = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
lowercase : Dict = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
lowercase : Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] = torch.tensor(a_ )
elif key_name.endswith("/softmlp/kernel" ):
lowercase : Any = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
lowercase : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : List[Any] = torch.tensor(a_ )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
lowercase : Tuple = key_name[-9:-7]
for i in range(16 ):
lowercase : Optional[int] = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
lowercase : Union[str, Any] = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowercase : Any = torch.tensor(a_ )
elif key_name.startswith("model/mlp" ):
lowercase : List[str] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
lowercase : Optional[Any] = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
lowercase : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Union[str, Any] = torch.tensor(a_ )
elif key_name.endswith("/p1/bias" ):
lowercase : Optional[Any] = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
lowercase : List[Any] = vnp.copy() # same because it is one dimensional
lowercase : List[Any] = torch.tensor(a_ )
elif key_name.endswith("/p2/kernel" ):
lowercase : Tuple = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
lowercase : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[int] = torch.tensor(a_ )
elif key_name.endswith("/p2/bias" ):
lowercase : Tuple = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
lowercase : Optional[Any] = vnp.copy() # same because it is one dimensional
lowercase : List[str] = torch.tensor(a_ )
elif key_name.startswith("model/ln" ):
lowercase : List[Any] = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
lowercase : Tuple = "model.blocks.%d.feed_forward.norm.bias" % player
lowercase : int = vnp.copy() # same because it is one dimensional
lowercase : int = torch.tensor(a_ )
elif key_name.endswith("/g" ):
lowercase : List[Any] = "model.blocks.%d.feed_forward.norm.weight" % player
lowercase : str = vnp.copy() # same because it is one dimensional
lowercase : List[Any] = torch.tensor(a_ )
elif key_name.startswith("model/att" ):
lowercase : Dict = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
lowercase : List[Any] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowercase : Tuple = state[:, 0, :, :]
lowercase : str = state[:, 1, :, :]
lowercase : Optional[int] = state[:, 2, :, :]
lowercase : Optional[Any] = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Dict = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : int = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : str = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
lowercase : Optional[Any] = torch.tensor(a_ )
lowercase : List[Any] = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
lowercase : Dict = torch.tensor(a_ )
lowercase : List[str] = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
lowercase : Optional[Any] = torch.tensor(a_ )
elif key_name.endswith("/o/kernel" ):
lowercase : Optional[int] = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
lowercase : List[Any] = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[int] = torch.tensor(a_ )
elif key_name.startswith("model/an" ):
lowercase : int = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
lowercase : Optional[int] = "model.blocks.%d.self_attn.norm.bias" % player
lowercase : Union[str, Any] = vnp.copy() # same because it is one dimensional
lowercase : Optional[int] = torch.tensor(a_ )
elif key_name.endswith("/g" ):
lowercase : int = "model.blocks.%d.self_attn.norm.weight" % player
lowercase : int = vnp.copy() # same because it is one dimensional
lowercase : Tuple = torch.tensor(a_ )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
lowercase : List[str] = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
lowercase : Any = "model.%s.weight" % nlayer
lowercase : Union[str, Any] = vnp.copy() # same in embedded
lowercase : int = torch.tensor(a_ )
if key_name.startswith("model/wte" ):
lowercase : str = "lm_head.weight"
lowercase : Union[str, Any] = vnp.copy() # same in embedded
lowercase : List[str] = torch.tensor(a_ )
elif key_name.startswith("model/wob" ):
lowercase : Tuple = "final_logits_bias"
lowercase : Dict = vnp.copy() # same in embedded
lowercase : List[Any] = state.reshape((1, -1) )
lowercase : Dict = torch.tensor(a_ )
elif key_name == "model/dense/kernel":
lowercase : int = "model.last_project.weight"
lowercase : List[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] = torch.tensor(a_ )
elif key_name == "model/dense_1/bias":
lowercase : List[str] = "model.last_project.bias"
lowercase : int = vnp.copy() # same because it is one dimensional
lowercase : Any = torch.tensor(a_ )
torch.save(a_ , args.output )
if __name__ == "__main__":
_A : Optional[int] = argparse.ArgumentParser(
description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""")
parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""")
_A : Optional[Any] = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 361 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : str ,A : int ,A : int=2 ,A : Optional[Any]=3 ,A : Dict=4 ,A : Optional[int]=2 ,A : Union[str, Any]=7 ,A : List[str]=True ,A : Union[str, Any]=True ,A : Optional[int]=True ,A : Optional[int]=True ,A : Tuple=99 ,A : Optional[int]=36 ,A : Dict=3 ,A : str=4 ,A : Optional[Any]=37 ,A : Dict="gelu" ,A : Dict=0.1 ,A : Union[str, Any]=0.1 ,A : Union[str, Any]=5_12 ,A : Any=16 ,A : Union[str, Any]=2 ,A : List[Any]=0.02 ,A : List[Any]=6 ,A : Optional[int]=6 ,A : List[Any]=3 ,A : Union[str, Any]=4 ,A : Tuple=None ,A : List[str]=10_00 ,):
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = patch_size
__A = text_seq_length
__A = is_training
__A = use_input_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = coordinate_size
__A = shape_size
__A = num_labels
__A = num_choices
__A = scope
__A = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__A = text_seq_length
__A = (image_size // patch_size) ** 2 + 1
__A = self.text_seq_length + self.image_seq_length
def UpperCamelCase_ ( self : int ):
__A = ids_tensor([self.batch_size, self.text_seq_length] ,self.vocab_size )
__A = ids_tensor([self.batch_size, self.text_seq_length, 4] ,self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__A = bbox[i, j, 3]
__A = bbox[i, j, 1]
__A = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__A = bbox[i, j, 2]
__A = bbox[i, j, 0]
__A = t
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_input_mask:
__A = random_attention_mask([self.batch_size, self.text_seq_length] )
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.text_seq_length] ,self.type_vocab_size )
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.text_seq_length] ,self.num_labels )
__A = LayoutLMvaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,coordinate_size=self.coordinate_size ,shape_size=self.shape_size ,input_size=self.image_size ,patch_size=self.patch_size ,)
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase_ ( self : Optional[int] ,A : List[str] ,A : Any ,A : Dict ,A : List[Any] ,A : Optional[int] ,A : Any ,A : Dict ,A : List[Any] ):
__A = LayoutLMvaModel(config=A )
model.to(A )
model.eval()
# text + image
__A = model(A ,pixel_values=A )
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A )
__A = model(A ,bbox=A ,pixel_values=A ,token_type_ids=A )
__A = model(A ,bbox=A ,pixel_values=A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
# text only
__A = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__A = model(pixel_values=A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Optional[int] ,A : Dict ,A : List[str] ,A : Any ,A : List[Any] ,A : Any ,A : Any ,A : Dict ,A : Optional[Any] ):
__A = self.num_labels
__A = LayoutLMvaForSequenceClassification(A )
model.to(A )
model.eval()
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A ,labels=A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : str ,A : Optional[Any] ,A : Dict ,A : str ,A : Tuple ,A : Union[str, Any] ,A : List[Any] ,A : Any ,A : Union[str, Any] ):
__A = self.num_labels
__A = LayoutLMvaForTokenClassification(config=A )
model.to(A )
model.eval()
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A ,labels=A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Optional[int] ,A : Optional[Any] ,A : int ,A : str ,A : List[str] ,A : int ,A : List[str] ,A : List[str] ,A : Dict ):
__A = LayoutLMvaForQuestionAnswering(config=A )
model.to(A )
model.eval()
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A ,start_positions=A ,end_positions=A ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : str ):
__A = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = config_and_inputs
__A = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case_ = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self : str ,A : Any ,A : Any ,A : Tuple ,A : List[Any] ,A : Optional[Any] ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = LayoutLMvaModelTester(self )
__A = ConfigTester(self ,config_class=A ,hidden_size=37 )
def UpperCamelCase_ ( self : List[Any] ,A : int ,A : List[str] ,A : Dict=False ):
__A = copy.deepcopy(A )
if model_class in get_values(A ):
__A = {
k: v.unsqueeze(1 ).expand(-1 ,self.model_tester.num_choices ,-1 ).contiguous()
if isinstance(A ,torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(A ):
__A = torch.ones(self.model_tester.batch_size ,dtype=torch.long ,device=A )
elif model_class in get_values(A ):
__A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
__A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
elif model_class in [
*get_values(A ),
]:
__A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
elif model_class in [
*get_values(A ),
]:
__A = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) ,dtype=torch.long ,device=A ,)
return inputs_dict
def UpperCamelCase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : str ):
__A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__A = type
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
def UpperCamelCase_ ( self : str ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = LayoutLMvaModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
__A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : Any ):
return LayoutLMvaImageProcessor(apply_ocr=A ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self : Dict ):
__A = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(A )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="pt" ).pixel_values.to(A )
__A = torch.tensor([[1, 2]] )
__A = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
__A = model(
input_ids=input_ids.to(A ) ,bbox=bbox.to(A ) ,pixel_values=pixel_values.to(A ) ,)
# verify the logits
__A = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape ,A )
__A = torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,A ,atol=1E-4 ) )
| 55 | 0 |
lowercase : Optional[Any] = range(2, 20 + 1)
lowercase : Optional[int] = [10**k for k in range(ks[-1] + 1)]
lowercase : dict[int, dict[int, list[list[int]]]] = {}
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
lowercase : Any = sum(a_i[j] for j in range(a_ , len(a_ ) ) )
lowercase : Any = sum(a_i[j] * base[j] for j in range(min(len(a_ ) , a_ ) ) )
lowercase , lowercase : List[str] = 0, 0
lowercase : Dict = n - i
lowercase : List[str] = memo.get(a_ )
if sub_memo is not None:
lowercase : Tuple = sub_memo.get(a_ )
if jumps is not None and len(a_ ) > 0:
# find and make the largest jump without going over
lowercase : str = -1
for _k in range(len(a_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowercase : Any = _k
break
if max_jump >= 0:
lowercase , lowercase , lowercase : int = jumps[max_jump]
# since the difference between jumps is cached, add c
lowercase : List[str] = diff + c
for j in range(min(a_ , len(a_ ) ) ):
lowercase , lowercase : List[str] = divmod(a_ , 10 )
if new_c > 0:
add(a_ , a_ , a_ )
else:
lowercase : str = []
else:
lowercase : List[str] = {c: []}
lowercase : List[Any] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowercase , lowercase : Dict = next_term(a_ , k - 1 , i + dn , a_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowercase , lowercase : Tuple = compute(a_ , a_ , i + dn , a_ )
diff += _diff
dn += terms_jumped
lowercase : Optional[int] = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowercase : Any = 0
while j < len(a_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(a_ , (diff, dn, k) )
return (diff, dn)
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
if i >= n:
return 0, i
if k > len(a_ ):
a_i.extend([0 for _ in range(k - len(a_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowercase : Any = i
lowercase , lowercase , lowercase : Tuple = 0, 0, 0
for j in range(len(a_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowercase : Optional[Any] = ds_c + ds_b
diff += addend
lowercase : Dict = 0
for j in range(a_ ):
lowercase : List[Any] = a_i[j] + addend
lowercase , lowercase : int = divmod(a_ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(a_ , a_ , a_ )
return diff, i - start_i
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
for j in range(a_ , len(a_ ) ):
lowercase : Dict = digits[j] + addend
if s >= 10:
lowercase , lowercase : Optional[Any] = divmod(a_ , 10 )
lowercase : List[Any] = addend // 10 + quotient
else:
lowercase : Union[str, Any] = s
lowercase : Union[str, Any] = addend // 10
if addend == 0:
break
while addend > 0:
lowercase , lowercase : Union[str, Any] = divmod(a_ , 10 )
digits.append(a_ )
def _snake_case( SCREAMING_SNAKE_CASE__ = 10**15 ) -> int:
lowercase : int = [1]
lowercase : Any = 1
lowercase : List[str] = 0
while True:
lowercase , lowercase : Any = next_term(a_ , 20 , i + dn , a_ )
dn += terms_jumped
if dn == n - i:
break
lowercase : List[str] = 0
for j in range(len(a_ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 336 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any ,A : List[str] ,A : str=7 ,A : Optional[Any]=3 ,A : Any=18 ,A : int=30 ,A : int=4_00 ,A : List[str]=True ,A : Union[str, Any]=None ,A : Union[str, Any]=True ,A : Tuple=None ,A : Tuple=True ,A : Union[str, Any]=[0.5, 0.5, 0.5] ,A : str=[0.5, 0.5, 0.5] ,A : List[Any]=False ,):
__A = size if size is not None else {"height": 20, "width": 20}
__A = crop_size if crop_size is not None else {"height": 18, "width": 18}
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size
__A = do_center_crop
__A = crop_size
__A = do_normalize
__A = image_mean
__A = image_std
__A = do_reduce_labels
def UpperCamelCase_ ( self : List[str] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
__A = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__A = Image.open(dataset[0]["file"] )
__A = Image.open(dataset[1]["file"] )
return image, map
def UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
__A = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__A = Image.open(ds[0]["file"] )
__A = Image.open(ds[1]["file"] )
__A = Image.open(ds[2]["file"] )
__A = Image.open(ds[3]["file"] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = BeitImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : List[Any] ):
__A = BeitImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : int ):
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A ,"do_resize" ) )
self.assertTrue(hasattr(A ,"size" ) )
self.assertTrue(hasattr(A ,"do_center_crop" ) )
self.assertTrue(hasattr(A ,"center_crop" ) )
self.assertTrue(hasattr(A ,"do_normalize" ) )
self.assertTrue(hasattr(A ,"image_mean" ) )
self.assertTrue(hasattr(A ,"image_std" ) )
def UpperCamelCase_ ( self : List[str] ):
__A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"height": 20, "width": 20} )
self.assertEqual(image_processor.crop_size ,{"height": 18, "width": 18} )
self.assertEqual(image_processor.do_reduce_labels ,A )
__A = self.image_processing_class.from_dict(
self.image_processor_dict ,size=42 ,crop_size=84 ,reduce_labels=A )
self.assertEqual(image_processor.size ,{"height": 42, "width": 42} )
self.assertEqual(image_processor.crop_size ,{"height": 84, "width": 84} )
self.assertEqual(image_processor.do_reduce_labels ,A )
def UpperCamelCase_ ( self : List[Any] ):
pass
def UpperCamelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A ,Image.Image )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : List[str] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A )
for image in image_inputs:
self.assertIsInstance(A ,np.ndarray )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : int ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : str ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
__A = []
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__A = image_processing(image_inputs[0] ,maps[0] ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test batched
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test not batched input (PIL images)
__A , __A = prepare_semantic_single_inputs()
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test batched input (PIL images)
__A , __A = prepare_semantic_batch_inputs()
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
def UpperCamelCase_ ( self : Dict ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__A , __A = prepare_semantic_single_inputs()
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 1_50 )
__A = True
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
| 55 | 0 |
'''simple docstring'''
def _snake_case ( A_ : Union[str, Any] = 1000 ):
"""simple docstring"""
a_ : str = 3
a_ : Any = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 577 |
from numpy import exp, pi, sqrt
def UpperCAmelCase ( a_ , a_ = 0.0 , a_ = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__snake_case : str = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__snake_case : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 571 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCamelCase_ ( self : Optional[int] ):
__A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
__A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
__A = "xvjiarui/stable-diffusion-2-inpainting"
__A , __A = FlaxStableDiffusionInpaintPipeline.from_pretrained(A ,safety_checker=A )
__A = "Face of a yellow cat, high resolution, sitting on a park bench"
__A = jax.random.PRNGKey(0 )
__A = 50
__A = jax.device_count()
__A = num_samples * [prompt]
__A = num_samples * [init_image]
__A = num_samples * [mask_image]
__A , __A , __A = pipeline.prepare_inputs(A ,A ,A )
# shard inputs and rng
__A = replicate(A )
__A = jax.random.split(A ,jax.device_count() )
__A = shard(A )
__A = shard(A )
__A = shard(A )
__A = pipeline(
A ,A ,A ,A ,A ,A ,jit=A )
__A = output.images.reshape(A ,5_12 ,5_12 ,3 )
__A = images[0, 2_53:2_56, 2_53:2_56, -1]
__A = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__A = jnp.array(
[0.3_61_13_07, 0.37_64_97_36, 0.3_75_74_08, 0.38_21_39_53, 0.39_29_51_67, 0.3_84_16_31, 0.41_55_49_78, 0.4_13_74_75, 0.4_21_70_84] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 55 | 0 |
'''simple docstring'''
from math import sqrt
def __A ( _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number >= 0
), "'number' must been an int and positive"
__SCREAMING_SNAKE_CASE : Tuple = True
# 0 and 1 are none primes.
if number <= 1:
__SCREAMING_SNAKE_CASE : Any = False
for divisor in range(2 , int(round(sqrt(a_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__SCREAMING_SNAKE_CASE : List[str] = False
break
# precondition
assert isinstance(a_ , a_ ), "'status' must been from type bool"
return status
def __A ( _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__SCREAMING_SNAKE_CASE : Union[str, Any] = list(range(2 , n + 1 ) )
__SCREAMING_SNAKE_CASE : Tuple = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(a_ ) ):
for j in range(i + 1 , len(a_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__SCREAMING_SNAKE_CASE : Dict = 0
# filters actual prime numbers.
__SCREAMING_SNAKE_CASE : Dict = [x for x in begin_list if x != 0]
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type list"
return ans
def __A ( _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n > 2), "'N' must been an int and > 2"
__SCREAMING_SNAKE_CASE : List[Any] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(a_ ):
ans.append(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type list"
return ans
def __A ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
assert isinstance(a_ , a_ ) and number >= 0, "'number' must been an int and >= 0"
__SCREAMING_SNAKE_CASE : Dict = [] # this list will be returns of the function.
# potential prime number factors.
__SCREAMING_SNAKE_CASE : List[str] = 2
__SCREAMING_SNAKE_CASE : List[str] = number
if number == 0 or number == 1:
ans.append(a_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(a_ ):
while quotient != 1:
if is_prime(a_ ) and (quotient % factor == 0):
ans.append(a_ )
quotient /= factor
else:
factor += 1
else:
ans.append(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type list"
return ans
def __A ( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
# prime factorization of 'number'
__SCREAMING_SNAKE_CASE : List[Any] = prime_factorization(a_ )
__SCREAMING_SNAKE_CASE : Dict = max(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type int"
return ans
def __A ( _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__SCREAMING_SNAKE_CASE : int = 0
# prime factorization of 'number'
__SCREAMING_SNAKE_CASE : Optional[int] = prime_factorization(a_ )
__SCREAMING_SNAKE_CASE : Dict = min(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type int"
return ans
def __A ( _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
assert isinstance(a_ , a_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , a_ ), "compare bust been from type bool"
return number % 2 == 0
def __A ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
assert isinstance(a_ , a_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , a_ ), "compare bust been from type bool"
return number % 2 != 0
def __A ( _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
assert (
isinstance(a_ , a_ ) and (number > 2) and is_even(a_ )
), "'number' must been an int, even and > 2"
__SCREAMING_SNAKE_CASE : Any = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__SCREAMING_SNAKE_CASE : Optional[Any] = get_prime_numbers(a_ )
__SCREAMING_SNAKE_CASE : Optional[Any] = len(a_ )
# run variable for while-loops.
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : Tuple = None
# exit variable. for break up the loops
__SCREAMING_SNAKE_CASE : Optional[int] = True
while i < len_pn and loop:
__SCREAMING_SNAKE_CASE : str = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__SCREAMING_SNAKE_CASE : List[str] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(a_ , a_ )
and (len(a_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def __A ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
while numbera != 0:
__SCREAMING_SNAKE_CASE : Any = numbera % numbera
__SCREAMING_SNAKE_CASE : List[Any] = numbera
__SCREAMING_SNAKE_CASE : Any = rest
# precondition
assert isinstance(a_ , a_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def __A ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__SCREAMING_SNAKE_CASE : str = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__SCREAMING_SNAKE_CASE : Optional[Any] = prime_factorization(a_ )
__SCREAMING_SNAKE_CASE : Optional[int] = prime_factorization(a_ )
elif numbera == 1 or numbera == 1:
__SCREAMING_SNAKE_CASE : str = []
__SCREAMING_SNAKE_CASE : Tuple = []
__SCREAMING_SNAKE_CASE : Any = max(a_ , a_ )
__SCREAMING_SNAKE_CASE : Tuple = 0
__SCREAMING_SNAKE_CASE : List[str] = 0
__SCREAMING_SNAKE_CASE : Dict = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__SCREAMING_SNAKE_CASE : Union[str, Any] = prime_fac_a.count(a_ )
__SCREAMING_SNAKE_CASE : Optional[int] = prime_fac_a.count(a_ )
for _ in range(max(a_ , a_ ) ):
ans *= n
else:
__SCREAMING_SNAKE_CASE : List[str] = prime_fac_a.count(a_ )
for _ in range(a_ ):
ans *= n
done.append(a_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__SCREAMING_SNAKE_CASE : Union[str, Any] = prime_fac_a.count(a_ )
for _ in range(a_ ):
ans *= n
done.append(a_ )
# precondition
assert isinstance(a_ , a_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def __A ( _SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 0), "'number' must been a positive int"
__SCREAMING_SNAKE_CASE : Dict = 0
__SCREAMING_SNAKE_CASE : Optional[int] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(a_ ):
ans += 1
# precondition
assert isinstance(a_ , a_ ) and is_prime(
a_ ), "'ans' must been a prime number and from type int"
return ans
def __A ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
assert (
is_prime(a_ ) and is_prime(a_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__SCREAMING_SNAKE_CASE : List[str] = p_number_a + 1 # jump to the next number
__SCREAMING_SNAKE_CASE : Union[str, Any] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(a_ ):
number += 1
while number < p_number_a:
ans.append(a_ )
number += 1
# fetch the next prime number.
while not is_prime(a_ ):
number += 1
# precondition
assert (
isinstance(a_ , a_ )
and ans[0] != p_number_a
and ans[len(a_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def __A ( _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 1), "'n' must been int and >= 1"
__SCREAMING_SNAKE_CASE : int = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(a_ )
# precondition
assert ans[0] == 1 and ans[len(a_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def __A ( _SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number > 1
), "'number' must been an int and >= 1"
__SCREAMING_SNAKE_CASE : str = get_divisors(a_ )
# precondition
assert (
isinstance(a_ , a_ )
and (divisors[0] == 1)
and (divisors[len(a_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def __A ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__SCREAMING_SNAKE_CASE : Tuple = gcd(abs(a_ ) , abs(a_ ) )
# precondition
assert (
isinstance(a_ , a_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def __A ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 0), "'n' must been a int and >= 0"
__SCREAMING_SNAKE_CASE : int = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def __A ( _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 0), "'n' must been an int and >= 0"
__SCREAMING_SNAKE_CASE : Dict = 0
__SCREAMING_SNAKE_CASE : Tuple = 1
__SCREAMING_SNAKE_CASE : str = 1 # this will be return
for _ in range(n - 1 ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ans
ans += fiba
__SCREAMING_SNAKE_CASE : List[Any] = tmp
return ans
| 211 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any ,A : Optional[int] ,A : Optional[int]=7 ,A : Optional[Any]=3 ,A : List[str]=18 ,A : Any=30 ,A : Tuple=4_00 ,A : Union[str, Any]=True ,A : Optional[Any]=32 ,A : Union[str, Any]=True ,):
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size_divisor
__A = do_rescale
def UpperCamelCase_ ( self : Union[str, Any] ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = GLPNImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : int ):
__A = GLPNImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Any ):
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A ,"do_resize" ) )
self.assertTrue(hasattr(A ,"size_divisor" ) )
self.assertTrue(hasattr(A ,"resample" ) )
self.assertTrue(hasattr(A ,"do_rescale" ) )
def UpperCamelCase_ ( self : str ):
pass
def UpperCamelCase_ ( self : Dict ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A ,Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def UpperCamelCase_ ( self : Optional[Any] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A )
for image in image_inputs:
self.assertIsInstance(A ,np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def UpperCamelCase_ ( self : int ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 55 | 0 |
"""simple docstring"""
def A ( __snake_case: int = "The quick brown fox jumps over the lazy dog" , ) -> bool:
"""simple docstring"""
__magic_name__ = set()
# Replace all the whitespace in our sentence
__magic_name__ = input_str.replace(' ' , '' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(a_ ) == 2_6
def A ( __snake_case: Dict = "The quick brown fox jumps over the lazy dog" , ) -> bool:
"""simple docstring"""
__magic_name__ = [False] * 2_6
for char in input_str:
if char.islower():
__magic_name__ = True
elif char.isupper():
__magic_name__ = True
return all(a_ )
def A ( __snake_case: int = "The quick brown fox jumps over the lazy dog" , ) -> bool:
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def A ( ) -> None:
"""simple docstring"""
from timeit import timeit
__magic_name__ = 'from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'
print(timeit('is_pangram()' , setup=a_ ) )
print(timeit('is_pangram_faster()' , setup=a_ ) )
print(timeit('is_pangram_fastest()' , setup=a_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 545 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case_ = Features({"image": Image()} )
snake_case_ = Features({"labels": ClassLabel} )
snake_case_ = "image"
snake_case_ = "labels"
def UpperCamelCase_ ( self : Optional[Any] ,A : Tuple ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] ,A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
__A = copy.deepcopy(self )
__A = self.label_schema.copy()
__A = features[self.label_column]
__A = label_schema
return task_template
@property
def UpperCamelCase_ ( self : Any ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 55 | 0 |
"""simple docstring"""
A_ : Union[str, Any] =[0, 2, 4, 6, 8]
A_ : Tuple =[1, 3, 5, 7, 9]
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[int] , snake_case : List[Any] , snake_case : Optional[int] , snake_case : List[Any] )-> int:
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
_lowerCamelCase = 0
for digit in range(10 ):
_lowerCamelCase = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , a_ , a_ )
return result
_lowerCamelCase = 0
for digita in range(10 ):
_lowerCamelCase = digita
if (remainder + digita) % 2 == 0:
_lowerCamelCase = ODD_DIGITS
else:
_lowerCamelCase = EVEN_DIGITS
for digita in other_parity_digits:
_lowerCamelCase = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , a_ , a_ , )
return result
def SCREAMING_SNAKE_CASE_ ( snake_case : Tuple = 9 )-> int:
_lowerCamelCase = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(a_ , 0 , [0] * length , a_ )
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 650 |
from math import sqrt
def UpperCAmelCase ( a_ ) -> bool:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number >= 0
), "'number' must been an int and positive"
__A = True
# 0 and 1 are none primes.
if number <= 1:
__A = False
for divisor in range(2 , int(round(sqrt(a_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__A = False
break
# precondition
assert isinstance(a_ , a_ ), "'status' must been from type bool"
return status
def UpperCAmelCase ( a_ ) -> Any:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__A = list(range(2 , n + 1 ) )
__A = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(a_ ) ):
for j in range(i + 1 , len(a_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__A = 0
# filters actual prime numbers.
__A = [x for x in begin_list if x != 0]
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n > 2), "'N' must been an int and > 2"
__A = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(a_ ):
ans.append(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and number >= 0, "'number' must been an int and >= 0"
__A = [] # this list will be returns of the function.
# potential prime number factors.
__A = 2
__A = number
if number == 0 or number == 1:
ans.append(a_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(a_ ):
while quotient != 1:
if is_prime(a_ ) and (quotient % factor == 0):
ans.append(a_ )
quotient /= factor
else:
factor += 1
else:
ans.append(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase ( a_ ) -> Any:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__A = 0
# prime factorization of 'number'
__A = prime_factorization(a_ )
__A = max(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type int"
return ans
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__A = 0
# prime factorization of 'number'
__A = prime_factorization(a_ )
__A = min(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type int"
return ans
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
assert isinstance(a_ , a_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , a_ ), "compare bust been from type bool"
return number % 2 == 0
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
assert isinstance(a_ , a_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , a_ ), "compare bust been from type bool"
return number % 2 != 0
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
assert (
isinstance(a_ , a_ ) and (number > 2) and is_even(a_ )
), "'number' must been an int, even and > 2"
__A = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__A = get_prime_numbers(a_ )
__A = len(a_ )
# run variable for while-loops.
__A = 0
__A = None
# exit variable. for break up the loops
__A = True
while i < len_pn and loop:
__A = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__A = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(a_ , a_ )
and (len(a_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def UpperCAmelCase ( a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__A = 0
while numbera != 0:
__A = numbera % numbera
__A = numbera
__A = rest
# precondition
assert isinstance(a_ , a_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def UpperCAmelCase ( a_ , a_ ) -> List[str]:
"""simple docstring"""
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__A = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__A = prime_factorization(a_ )
__A = prime_factorization(a_ )
elif numbera == 1 or numbera == 1:
__A = []
__A = []
__A = max(a_ , a_ )
__A = 0
__A = 0
__A = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__A = prime_fac_a.count(a_ )
__A = prime_fac_a.count(a_ )
for _ in range(max(a_ , a_ ) ):
ans *= n
else:
__A = prime_fac_a.count(a_ )
for _ in range(a_ ):
ans *= n
done.append(a_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__A = prime_fac_a.count(a_ )
for _ in range(a_ ):
ans *= n
done.append(a_ )
# precondition
assert isinstance(a_ , a_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 0), "'number' must been a positive int"
__A = 0
__A = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(a_ ):
ans += 1
# precondition
assert isinstance(a_ , a_ ) and is_prime(
a_ ), "'ans' must been a prime number and from type int"
return ans
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
assert (
is_prime(a_ ) and is_prime(a_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__A = p_number_a + 1 # jump to the next number
__A = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(a_ ):
number += 1
while number < p_number_a:
ans.append(a_ )
number += 1
# fetch the next prime number.
while not is_prime(a_ ):
number += 1
# precondition
assert (
isinstance(a_ , a_ )
and ans[0] != p_number_a
and ans[len(a_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 1), "'n' must been int and >= 1"
__A = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(a_ )
# precondition
assert ans[0] == 1 and ans[len(a_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number > 1
), "'number' must been an int and >= 1"
__A = get_divisors(a_ )
# precondition
assert (
isinstance(a_ , a_ )
and (divisors[0] == 1)
and (divisors[len(a_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def UpperCAmelCase ( a_ , a_ ) -> str:
"""simple docstring"""
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__A = gcd(abs(a_ ) , abs(a_ ) )
# precondition
assert (
isinstance(a_ , a_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 0), "'n' must been a int and >= 0"
__A = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 0), "'n' must been an int and >= 0"
__A = 0
__A = 1
__A = 1 # this will be return
for _ in range(n - 1 ):
__A = ans
ans += fiba
__A = tmp
return ans
| 55 | 0 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ = 1
@register_to_config
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : str=2_000 , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : List[str]=20 , __SCREAMING_SNAKE_CASE : int=1E-3 ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, torch.device] = None ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = torch.linspace(1 , self.config.sampling_eps , __SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None ) -> Tuple:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__SCREAMING_SNAKE_CASE = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__SCREAMING_SNAKE_CASE = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__SCREAMING_SNAKE_CASE = std.flatten()
while len(std.shape ) < len(score.shape ):
__SCREAMING_SNAKE_CASE = std.unsqueeze(-1 )
__SCREAMING_SNAKE_CASE = -score / std
# compute
__SCREAMING_SNAKE_CASE = -1.0 / len(self.timesteps )
__SCREAMING_SNAKE_CASE = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__SCREAMING_SNAKE_CASE = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__SCREAMING_SNAKE_CASE = beta_t.unsqueeze(-1 )
__SCREAMING_SNAKE_CASE = -0.5 * beta_t * x
__SCREAMING_SNAKE_CASE = torch.sqrt(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = drift - diffusion**2 * score
__SCREAMING_SNAKE_CASE = x + drift * dt
# add noise
__SCREAMING_SNAKE_CASE = randn_tensor(x.shape , layout=x.layout , generator=__SCREAMING_SNAKE_CASE , device=x.device , dtype=x.dtype )
__SCREAMING_SNAKE_CASE = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return self.config.num_train_timesteps
| 627 |
import os
def UpperCAmelCase ( ) -> Any:
"""simple docstring"""
__A = os.path.dirname(os.path.realpath(a_ ) )
__A = os.path.join(a_ , "triangle.txt" )
with open(a_ ) as f:
__A = f.readlines()
__A = []
for line in triangle:
__A = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(a_ ) )
a.append(a_ )
for i in range(1 , len(a_ ) ):
for j in range(len(a[i] ) ):
__A = a[i - 1][j] if j != len(a[i - 1] ) else 0
__A = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(a_ , a_ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 55 | 0 |
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = '▁'
__snake_case = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
__snake_case = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
__snake_case = {
'facebook/s2t-small-librispeech-asr': 1024,
}
__snake_case = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
__snake_case = {'mustc': MUSTC_LANGS}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_a : Optional[Any] = VOCAB_FILES_NAMES
_a : Dict = PRETRAINED_VOCAB_FILES_MAP
_a : Optional[Any] = MAX_MODEL_INPUT_SIZES
_a : int = ['''input_ids''', '''attention_mask''']
_a : List[Any] = []
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<unk>" , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> int:
lowercase__ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , do_upper_case=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , tgt_lang=lowerCamelCase__ , lang_codes=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
lowercase__ : Tuple = do_upper_case
lowercase__ : Optional[Any] = do_lower_case
lowercase__ : Tuple = load_json(lowerCamelCase__ )
lowercase__ : List[Any] = {v: k for k, v in self.encoder.items()}
lowercase__ : Dict = spm_file
lowercase__ : Dict = load_spm(lowerCamelCase__ , self.sp_model_kwargs )
if lang_codes is not None:
lowercase__ : Dict = lang_codes
lowercase__ : str = LANGUAGES[lang_codes]
lowercase__ : Optional[int] = [F'''<lang:{lang}>''' for lang in self.langs]
lowercase__ : Any = {lang: self.sp_model.PieceToId(F'''<lang:{lang}>''' ) for lang in self.langs}
lowercase__ : str = self.lang_tokens
lowercase__ : Optional[int] = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
lowercase__ : List[str] = {}
@property
def UpperCAmelCase__( self ) -> List[str]:
return len(self.encoder )
@property
def UpperCAmelCase__( self ) -> Any:
return self._tgt_lang
@tgt_lang.setter
def UpperCAmelCase__( self , lowerCamelCase__ ) -> Dict:
lowercase__ : Optional[int] = new_tgt_lang
self.set_tgt_lang_special_tokens(lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> Union[str, Any]:
lowercase__ : str = self.lang_code_to_id[tgt_lang]
lowercase__ : str = [lang_code_id]
def UpperCAmelCase__( self , lowerCamelCase__ ) -> List[str]:
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> List[str]:
return self.encoder.get(lowerCamelCase__ , self.encoder[self.unk_token] )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> List[str]:
return self.decoder.get(lowerCamelCase__ , self.unk_token )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> str:
lowercase__ : Optional[Any] = []
lowercase__ : List[str] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
lowercase__ : Optional[Any] = self.sp_model.decode(lowerCamelCase__ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
lowercase__ : Optional[Any] = []
else:
current_sub_tokens.append(lowerCamelCase__ )
lowercase__ : Any = self.sp_model.decode(lowerCamelCase__ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__=None ) -> Any:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ) -> Union[str, Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
lowercase__ : List[Any] = [1] * len(self.prefix_tokens )
lowercase__ : int = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCamelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCamelCase__ )) + ([0] * len(lowerCamelCase__ )) + suffix_ones
def UpperCAmelCase__( self ) -> List[Any]:
lowercase__ : str = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[Any]:
lowercase__ : Any = self.__dict__.copy()
lowercase__ : Optional[int] = None
return state
def __setstate__( self , lowerCamelCase__ ) -> Union[str, Any]:
lowercase__ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase__ : str = {}
lowercase__ : List[str] = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple:
lowercase__ : Optional[int] = Path(lowerCamelCase__ )
assert save_dir.is_dir(), F'''{save_directory} should be a directory'''
lowercase__ : str = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
lowercase__ : List[Any] = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , lowerCamelCase__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , lowerCamelCase__ )
elif not os.path.isfile(self.spm_file ):
with open(lowerCamelCase__ , """wb""" ) as fi:
lowercase__ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (str(lowerCamelCase__ ), str(lowerCamelCase__ ))
def _lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : List[str] ):
lowercase__ : int = sentencepiece.SentencePieceProcessor(**a_ )
spm.Load(str(a_ ) )
return spm
def _lowerCamelCase ( lowerCamelCase__ : Union[str, Any] ):
with open(a_ , """r""" ) as f:
return json.load(a_ )
def _lowerCamelCase ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple ):
with open(a_ , """w""" ) as f:
json.dump(a_ , a_ , indent=2 )
| 200 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
SCREAMING_SNAKE_CASE :Union[str, Any] = object()
# For specifying empty leaf dict `{}`
SCREAMING_SNAKE_CASE :List[str] = object()
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
__A = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(a_ ) - len(a_ ) + 1 ):
__A = [x.match(a_ ) for x, y in zip(a_ , ks[i:] )]
if matches and all(a_ ):
return True
return False
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
def replace(a_ , a_ ):
for rule, replacement in rules:
if _match(a_ , a_ ):
return replacement
return val
return replace
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , a_ )),
(("transformer", "wte", "embedding"), P("mp" , a_ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(a_ , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , a_ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(a_ , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , a_ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
__A = _get_partition_rules()
__A = _replacement_rules(a_ )
__A = {k: _unmatched for k in flatten_dict(a_ )}
__A = {k: replace(a_ , a_ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(a_ ) )
| 55 | 0 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 60 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] ,A : Union[str, Any] ,A : List[Any]=13 ,A : Optional[Any]=30 ,A : Union[str, Any]=2 ,A : Union[str, Any]=3 ,A : Any=True ,A : Dict=True ,A : str=32 ,A : Tuple=2 ,A : Optional[int]=4 ,A : Tuple=37 ,A : List[Any]="gelu" ,A : Dict=0.1 ,A : Optional[int]=0.1 ,A : List[Any]=10 ,A : Optional[Any]=0.02 ,A : Dict=3 ,A : Dict=None ,A : List[Any]=2 ,):
__A = parent
__A = batch_size
__A = image_size
__A = patch_size
__A = num_channels
__A = is_training
__A = use_labels
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = type_sequence_label_size
__A = initializer_range
__A = scope
__A = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__A = (image_size // patch_size) ** 2
__A = num_patches + 2
def UpperCamelCase_ ( self : List[Any] ):
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__A = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : Optional[int] ):
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=A ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def UpperCamelCase_ ( self : List[str] ,A : List[Any] ,A : Optional[int] ,A : Union[str, Any] ):
__A = TFDeiTModel(config=A )
__A = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : List[Any] ,A : List[Any] ,A : Optional[Any] ,A : Dict ):
__A = TFDeiTForMaskedImageModeling(config=A )
__A = model(A )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__A = 1
__A = TFDeiTForMaskedImageModeling(A )
__A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A = model(A )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase_ ( self : Optional[Any] ,A : Union[str, Any] ,A : Dict ,A : Union[str, Any] ):
__A = self.type_sequence_label_size
__A = TFDeiTForImageClassification(A )
__A = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__A = 1
__A = TFDeiTForImageClassification(A )
__A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self : str ):
__A = self.prepare_config_and_inputs()
__A , __A , __A = config_and_inputs
__A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
snake_case_ = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def UpperCamelCase_ ( self : str ):
__A = TFDeiTModelTester(self )
__A = ConfigTester(self ,config_class=A ,has_text_modality=A ,hidden_size=37 )
def UpperCamelCase_ ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def UpperCamelCase_ ( self : Union[str, Any] ):
pass
def UpperCamelCase_ ( self : List[Any] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
__A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A ,tf.keras.layers.Dense ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
__A = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,A )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCamelCase_ ( self : Optional[int] ,A : Union[str, Any] ,A : List[str] ,A : Optional[Any]=False ):
__A = super()._prepare_for_class(A ,A ,return_labels=A )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def UpperCamelCase_ ( self : Any ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = TFDeiTModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
__A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : int ):
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase_ ( self : Optional[int] ):
__A = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="tf" )
# forward pass
__A = model(**A )
# verify the logits
__A = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape ,A )
__A = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,A ,atol=1E-4 ) )
| 55 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = '▁'
__UpperCAmelCase = {'vocab_file': 'spiece.model'}
__UpperCAmelCase = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
__UpperCAmelCase = {
'google/pegasus-xsum': 512,
}
__UpperCAmelCase = logging.get_logger(__name__)
class __UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
__lowerCamelCase : Tuple = VOCAB_FILES_NAMES
__lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
__lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : List[str] = ["input_ids", "attention_mask"]
def __init__( self : Tuple , a_ : Union[str, Any] , a_ : List[str]="<pad>" , a_ : Dict="</s>" , a_ : Union[str, Any]="<unk>" , a_ : List[str]="<mask_2>" , a_ : Optional[int]="<mask_1>" , a_ : Union[str, Any]=None , a_ : Optional[Any]=1_03 , a_ : Optional[Dict[str, Any]] = None , **a_ : Dict , ) -> str:
'''simple docstring'''
a__ : str = offset
if additional_special_tokens is not None:
if not isinstance(a_ , a_ ):
raise TypeError(
F"additional_special_tokens should be of type {type(a_ )}, but is"
F" {type(a_ )}" )
a__ : Optional[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"<unk_{i}>" for i in range(len(a_ ) , self.offset - 1 )
]
if len(set(a_ ) ) != len(a_ ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
F" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
a__ : str = additional_special_tokens_extended
else:
a__ : Dict = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"<unk_{i}>" for i in range(2 , self.offset )]
a__ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=a_ , unk_token=a_ , mask_token=a_ , pad_token=a_ , mask_token_sent=a_ , offset=a_ , additional_special_tokens=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
a__ : str = mask_token_sent
a__ : Tuple = vocab_file
a__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
# add special tokens to encoder dict
a__ : Dict = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
a__ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return len(self.sp_model ) + self.offset
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
a__ : Tuple = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
a__ : Any = self.__dict__.copy()
a__ : List[Any] = None
return state
def __setstate__( self : int , a_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
a__ : str = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a__ : Optional[int] = {}
a__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self : int , a_ : str ) -> Dict:
'''simple docstring'''
return self.sp_model.encode(a_ , out_type=a_ )
def UpperCAmelCase ( self : Optional[int] , a_ : str ) -> str:
'''simple docstring'''
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
a__ : Optional[Any] = self.sp_model.piece_to_id(a_ )
return sp_id + self.offset
def UpperCAmelCase ( self : int , a_ : int ) -> List[Any]:
'''simple docstring'''
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
a__ : Optional[int] = self.sp_model.IdToPiece(index - self.offset )
return token
def UpperCAmelCase ( self : Optional[int] , a_ : str ) -> str:
'''simple docstring'''
a__ : Dict = []
a__ : int = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a_ ) + token
a__ : Dict = []
else:
current_sub_tokens.append(a_ )
out_string += self.sp_model.decode(a_ )
return out_string.strip()
def UpperCAmelCase ( self : Union[str, Any] , a_ : Optional[Any]=False ) -> List[Any]:
'''simple docstring'''
return 1
def UpperCAmelCase ( self : Optional[Any] , a_ : int ) -> int:
'''simple docstring'''
a__ : List[Any] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def UpperCAmelCase ( self : Optional[Any] , a_ : List , a_ : Optional[List] = None , a_ : bool = False ) -> Optional[int]:
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(a_ )
elif token_ids_a is None:
return self._special_token_mask(a_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def UpperCAmelCase ( self : List[str] , a_ : Any , a_ : Union[str, Any]=None ) -> Optional[Any]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self : Tuple , a_ : str , a_ : Optional[str] = None ) -> Dict:
'''simple docstring'''
if not os.path.isdir(a_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
a__ : Union[str, Any] = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , "wb" ) as fi:
a__ : List[str] = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 642 |
SCREAMING_SNAKE_CASE :List[Any] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
SCREAMING_SNAKE_CASE :Union[str, Any] = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
SCREAMING_SNAKE_CASE :int = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def UpperCAmelCase ( a_ , a_ , a_ ) -> str:
"""simple docstring"""
assert len(str(a_ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 1_2, "month should be between 1 to 12"
assert 1 <= day <= 3_1, "day should be between 1 to 31"
# Doomsday algorithm:
__A = year // 1_0_0
__A = (5 * (century % 4) + 2) % 7
__A = year % 1_0_0
__A = centurian % 1_2
__A = (
(centurian // 1_2) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__A = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_0_0) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__A = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55 | 0 |
"""simple docstring"""
def __magic_name__ ( __snake_case : Union[str, Any] ) -> bool:
if not isinstance(a_ , a_ ):
lowercase : List[str] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(a_ )
if number < 0:
return False
lowercase : List[str] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def UpperCAmelCase ( a_ = "isbn/0140328726" ) -> dict:
"""simple docstring"""
__A = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
__A = F'''{olid} is not a valid Open Library olid'''
raise ValueError(a_ )
return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json()
def UpperCAmelCase ( a_ ) -> dict:
"""simple docstring"""
__A = {
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
__A = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__A = [
get_openlibrary_data(author["key"] )["name"] for author in data["Authors"]
]
__A = data["First sentence"]["value"]
for key, value in data.items():
if isinstance(a_ , a_ ):
__A = ", ".join(a_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
SCREAMING_SNAKE_CASE :int = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
SCREAMING_SNAKE_CASE :Any = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print('\n'.join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 55 | 0 |
import collections
import importlib.util
import os
import re
from pathlib import Path
lowercase : str = 'src/transformers'
# Matches is_xxx_available()
lowercase : List[str] = re.compile(R"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
lowercase : Any = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowercase : Optional[int] = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
lowercase : List[str] = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
lowercase : Tuple = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowercase : List[Any] = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
lowercase : int = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowercase : Tuple = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
lowercase : Union[str, Any] = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
lowercase : Union[str, Any] = re.compile(R"""^\s*try:""")
# Catches a line with else:
lowercase : Dict = re.compile(R"""^\s*else:""")
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
if _re_test_backend.search(a_ ) is None:
return None
lowercase : Optional[Any] = [b[0] for b in _re_backend.findall(a_ )]
backends.sort()
return "_and_".join(a_ )
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
with open(a_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase : Tuple = f.readlines()
lowercase : Any = 0
while line_index < len(a_ ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(a_ ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase : List[Any] = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
lowercase : str = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(a_ ):
lowercase : Any = _re_one_line_import_struct.search(a_ ).groups()[0]
lowercase : Any = re.findall("""\[([^\]]+)\]""" , a_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
lowercase : Union[str, Any] = _re_import_struct_key_value.search(a_ )
if single_line_import_search is not None:
lowercase : Optional[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(a_ ) > 0]
objects.extend(a_ )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
lowercase : str = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase : Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase : Union[str, Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase : Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
lowercase : Tuple = lines[line_index]
if _re_import_struct_add_one.search(a_ ) is not None:
objects.append(_re_import_struct_add_one.search(a_ ).groups()[0] )
elif _re_import_struct_add_many.search(a_ ) is not None:
lowercase : List[Any] = _re_import_struct_add_many.search(a_ ).groups()[0].split(""", """ )
lowercase : List[str] = [obj[1:-1] for obj in imports if len(a_ ) > 0]
objects.extend(a_ )
elif _re_between_brackets.search(a_ ) is not None:
lowercase : int = _re_between_brackets.search(a_ ).groups()[0].split(""", """ )
lowercase : str = [obj[1:-1] for obj in imports if len(a_ ) > 0]
objects.extend(a_ )
elif _re_quote_object.search(a_ ) is not None:
objects.append(_re_quote_object.search(a_ ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
lowercase : Union[str, Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase : List[Any] = []
while (
line_index < len(a_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
lowercase : Optional[Any] = lines[line_index]
lowercase : List[str] = _re_import.search(a_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase : Tuple = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(a_ ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase : Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase : int = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
lowercase : Tuple = lines[line_index]
lowercase : Dict = _re_import.search(a_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase : str = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
def find_duplicates(SCREAMING_SNAKE_CASE__ ):
return [k for k, v in collections.Counter(a_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase : Union[str, Any] = []
for key in import_dict_objects.keys():
lowercase : Tuple = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"Duplicate _import_structure definitions for: {duplicate_imports}" )
lowercase : Union[str, Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase : Tuple = """base imports""" if key == """none""" else f"{key} backend"
errors.append(f"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f" {a} in _import_structure but not in TYPE_HINT." )
return errors
def _snake_case( ) -> str:
lowercase : int = []
for root, _, files in os.walk(a_ ):
if "__init__.py" in files:
lowercase : Any = os.path.join(a_ , """__init__.py""" )
lowercase : Optional[Any] = parse_init(a_ )
if objects is not None:
lowercase : Union[str, Any] = analyze_results(*a_ )
if len(a_ ) > 0:
lowercase : Any = f"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("""\n""".join(a_ ) )
if len(a_ ) > 0:
raise ValueError("""\n\n""".join(a_ ) )
def _snake_case( ) -> List[str]:
lowercase : Dict = []
for path, directories, files in os.walk(a_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(a_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(a_ ) / folder).glob("""*.py""" ) ) ) == 0:
continue
lowercase : Dict = str((Path(a_ ) / folder).relative_to(a_ ) )
lowercase : Optional[Any] = short_path.replace(os.path.sep , """.""" )
submodules.append(a_ )
for fname in files:
if fname == "__init__.py":
continue
lowercase : List[str] = str((Path(a_ ) / fname).relative_to(a_ ) )
lowercase : List[Any] = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(a_ )
return submodules
lowercase : Tuple = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def _snake_case( ) -> Optional[int]:
lowercase : int = importlib.util.spec_from_file_location(
"""transformers""" , os.path.join(a_ , """__init__.py""" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowercase : Dict = spec.loader.load_module()
lowercase : int = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(a_ ) > 0:
lowercase : Optional[Any] = """\n""".join(f"- {module}" for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registered in the main init of Transformers:\n"""
f"{list_of_modules}\n"
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 336 |
import requests
SCREAMING_SNAKE_CASE :List[str] = 'YOUR API KEY'
def UpperCAmelCase ( a_ , a_ = giphy_api_key ) -> list:
"""simple docstring"""
__A = "+".join(query.split() )
__A = F'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
__A = requests.get(a_ ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 55 | 0 |
'''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _UpperCAmelCase ( pl.LightningModule ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ):
'''simple docstring'''
super().__init__()
a_ : Dict = model
a_ : Optional[int] = 2
a_ : Dict = nn.Linear(self.model.config.hidden_size , self.num_labels )
def _lowerCAmelCase ( self ):
'''simple docstring'''
pass
def _snake_case ( A_ : List[Any] , A_ : List[Any] , A_ : Optional[int] ):
"""simple docstring"""
a_ : List[str] = LongformerModel.from_pretrained(a_ )
a_ : List[str] = LightningModel(a_ )
a_ : Optional[int] = torch.load(a_ , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
a_ : Optional[Any] = LongformerForQuestionAnswering.from_pretrained(a_ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(a_ )
print(f'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
__snake_case: List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__snake_case: Optional[Any] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 577 |
import itertools
import math
def UpperCAmelCase ( a_ ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
__A = 2
while True:
if is_prime(a_ ):
yield num
num += 1
def UpperCAmelCase ( a_ = 1_0_0_0_1 ) -> int:
"""simple docstring"""
return next(itertools.islice(prime_generator() , nth - 1 , a_ ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 55 | 0 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__snake_case : Union[str, Any] = object()
# For specifying empty leaf dict `{}`
__snake_case : List[str] = object()
def a_ ( __a , __a ):
A__ = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(a_ ) - len(a_ ) + 1 ):
A__ = [x.match(a_ ) for x, y in zip(a_ , ks[i:] )]
if matches and all(a_ ):
return True
return False
def a_ ( __a ):
def replace(__a , __a ):
for rule, replacement in rules:
if _match(a_ , a_ ):
return replacement
return val
return replace
def a_ ( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , a_ )),
(("transformer", "wte", "embedding"), P('''mp''' , a_ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(a_ , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , a_ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(a_ , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , a_ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def a_ ( __a ):
A__ = _get_partition_rules()
A__ = _replacement_rules(a_ )
A__ = {k: _unmatched for k in flatten_dict(a_ )}
A__ = {k: replace(a_ , a_ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(a_ ) )
| 571 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCAmelCase ( a_ , a_ , a_ ) -> List[str]:
"""simple docstring"""
__A = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
__A = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(a_ ):
os.makedirs(a_ )
__A = model.state_dict()
def to_tf_var_name(a_ ):
for patt, repl in iter(a_ ):
__A = name.replace(a_ , a_ )
return F'''bert/{name}'''
def create_tf_var(a_ , a_ , a_ ):
__A = tf.dtypes.as_dtype(tensor.dtype )
__A = tf.get_variable(dtype=a_ , shape=tensor.shape , name=a_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(a_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__A = to_tf_var_name(a_ )
__A = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__A = torch_tensor.T
__A = create_tf_var(tensor=a_ , name=a_ , session=a_ )
tf.keras.backend.set_value(a_ , a_ )
__A = session.run(a_ )
print(F'''Successfully created {tf_name}: {np.allclose(a_ , a_ )}''' )
__A = tf.train.Saver(tf.trainable_variables() )
saver.save(a_ , os.path.join(a_ , model_name.replace("-" , "_" ) + ".ckpt" ) )
def UpperCAmelCase ( a_=None ) -> List[Any]:
"""simple docstring"""
__A = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=a_ , required=a_ , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=a_ , default=a_ , required=a_ , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=a_ , required=a_ , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=a_ , required=a_ , help="Directory in which to save tensorflow model" )
__A = parser.parse_args(a_ )
__A = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=a_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 55 | 0 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , a__ , a__=13 , a__=7 , a__=False , a__=True , a__=False , a__=False , a__=19 , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.02 , a__=3 , a__=4 , a__=None , ):
__SCREAMING_SNAKE_CASE : List[str] = parent
__SCREAMING_SNAKE_CASE : Optional[int] = batch_size
__SCREAMING_SNAKE_CASE : Optional[Any] = seq_length
__SCREAMING_SNAKE_CASE : Optional[int] = is_training
__SCREAMING_SNAKE_CASE : Optional[Any] = use_input_mask
__SCREAMING_SNAKE_CASE : int = use_token_type_ids
__SCREAMING_SNAKE_CASE : Optional[int] = use_labels
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_size
__SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE : int = intermediate_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_act
__SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Union[str, Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : int = type_sequence_label_size
__SCREAMING_SNAKE_CASE : int = initializer_range
__SCREAMING_SNAKE_CASE : Tuple = num_labels
__SCREAMING_SNAKE_CASE : List[Any] = num_choices
__SCREAMING_SNAKE_CASE : Optional[int] = scope
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE : int = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : List[Any] = None
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ ( self ):
__SCREAMING_SNAKE_CASE : int = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=a__ , esmfold_config={"trunk": {"num_blocks": 2}, "fp16_esm": False} , )
return config
def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : int = EsmForProteinFolding(config=a__ ).float()
model.to(a__ )
model.eval()
__SCREAMING_SNAKE_CASE : str = model(a__ , attention_mask=a__ )
__SCREAMING_SNAKE_CASE : str = model(a__ )
__SCREAMING_SNAKE_CASE : int = model(a__ )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : Union[str, Any] = config_and_inputs
__SCREAMING_SNAKE_CASE : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : Dict = False
snake_case__ : Optional[int] = (EsmForProteinFolding,) if is_torch_available() else ()
snake_case__ : int = ()
snake_case__ : Any = {} if is_torch_available() else {}
snake_case__ : Union[str, Any] = False
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[str] = EsmFoldModelTester(self )
__SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=a__ , hidden_size=37 )
def a_ ( self ):
self.config_tester.run_common_tests()
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
@unittest.skip("Does not support attention outputs" )
def a_ ( self ):
pass
@unittest.skip
def a_ ( self ):
pass
@unittest.skip("Esm does not support embedding resizing" )
def a_ ( self ):
pass
@unittest.skip("Esm does not support embedding resizing" )
def a_ ( self ):
pass
@unittest.skip("ESMFold does not support passing input embeds!" )
def a_ ( self ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def a_ ( self ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def a_ ( self ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def a_ ( self ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def a_ ( self ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def a_ ( self ):
pass
@unittest.skip("ESMFold does not output hidden states in the normal way." )
def a_ ( self ):
pass
@unittest.skip("ESMfold does not output hidden states in the normal way." )
def a_ ( self ):
pass
@unittest.skip("ESMFold only has one output format." )
def a_ ( self ):
pass
@unittest.skip("This test doesn't work for ESMFold and doesn't test core functionality" )
def a_ ( self ):
pass
@unittest.skip("ESMFold does not support input chunking." )
def a_ ( self ):
pass
@unittest.skip("ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments." )
def a_ ( self ):
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def a_ ( self ):
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def a_ ( self ):
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def a_ ( self ):
pass
@unittest.skip("ESMFold doesn't support data parallel." )
def a_ ( self ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def a_ ( self ):
pass
@require_torch
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@slow
def a_ ( self ):
__SCREAMING_SNAKE_CASE : int = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1" ).float()
model.eval()
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(a__ )["positions"]
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , a__ , atol=1e-4 ) )
| 211 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE :Any = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Any = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case : List[str] = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Tuple = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Any = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Tuple = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
snake_case : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 545 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE :int = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def UpperCAmelCase ( a_ = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
__A = BeautifulSoup(requests.get(url + location ).content , "html.parser" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("div" , attrs={"data-tn-component": "organicJob"} ):
__A = job.find("a" , attrs={"data-tn-element": "jobTitle"} ).text.strip()
__A = job.find("span" , {"class": "company"} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f'''Job {i:>2} is {job[0]} at {job[1]}''')
| 55 | 0 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Optional[int] =logging.get_logger(__name__)
A_ : int =[
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
A_ : List[str] =[
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[int] )-> Optional[int]:
_lowerCamelCase = torch.load(a_ , map_location='cpu' )
return sd
def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : List[str] , snake_case : int=rename_keys_prefix )-> Dict:
_lowerCamelCase = OrderedDict()
_lowerCamelCase = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_lowerCamelCase = key
for name_pair in rename_keys_prefix:
_lowerCamelCase = new_key.replace(name_pair[0] , name_pair[1] )
_lowerCamelCase = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_lowerCamelCase = new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( snake_case : Union[str, Any] , snake_case : str )-> Optional[int]:
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), f'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
_lowerCamelCase = 'pretraining'
if "vcr" in checkpoint_path:
_lowerCamelCase = {'visual_embedding_dim': 512}
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase = {'visual_embedding_dim': 2_048}
elif "vqa" in checkpoint_path:
_lowerCamelCase = {'visual_embedding_dim': 2_048}
elif "nlvr" in checkpoint_path:
_lowerCamelCase = {'visual_embedding_dim': 1_024}
else:
raise NotImplementedError(f'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
_lowerCamelCase = {'visual_embedding_dim': 512}
_lowerCamelCase = 'multichoice'
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase = {'visual_embedding_dim': 2_048}
_lowerCamelCase = 'vqa_advanced'
elif "vqa" in checkpoint_path:
_lowerCamelCase = {'visual_embedding_dim': 2_048, 'num_labels': 3_129}
_lowerCamelCase = 'vqa'
elif "nlvr" in checkpoint_path:
_lowerCamelCase = {
'visual_embedding_dim': 1_024,
'num_labels': 2,
}
_lowerCamelCase = 'nlvr'
_lowerCamelCase = VisualBertConfig(**a_ )
# Load State Dict
_lowerCamelCase = load_state_dict(a_ )
_lowerCamelCase = get_new_dict(a_ , a_ )
if model_type == "pretraining":
_lowerCamelCase = VisualBertForPreTraining(a_ )
elif model_type == "vqa":
_lowerCamelCase = VisualBertForQuestionAnswering(a_ )
elif model_type == "nlvr":
_lowerCamelCase = VisualBertForVisualReasoning(a_ )
elif model_type == "multichoice":
_lowerCamelCase = VisualBertForMultipleChoice(a_ )
model.load_state_dict(a_ )
# Save Checkpoints
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
if __name__ == "__main__":
A_ : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
A_ : Optional[int] =parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 650 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : List[str] ):
__A = tempfile.mkdtemp()
__A = BlipImageProcessor()
__A = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
__A = BlipaProcessor(A ,A )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Dict ,**A : int ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**A ).tokenizer
def UpperCamelCase_ ( self : Dict ,**A : Optional[int] ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**A ).image_processor
def UpperCamelCase_ ( self : Dict ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self : Optional[int] ):
__A = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
__A = [Image.fromarray(np.moveaxis(A ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self : Any ):
__A = BlipaProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" )
__A = self.get_image_processor(do_normalize=A ,padding_value=1.0 )
__A = BlipaProcessor.from_pretrained(
self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=A ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,A )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,A )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = self.prepare_image_inputs()
__A = image_processor(A ,return_tensors="np" )
__A = processor(images=A ,return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase_ ( self : Tuple ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = processor(text=A )
__A = tokenizer(A ,return_token_type_ids=A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase_ ( self : int ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = self.prepare_image_inputs()
__A = processor(text=A ,images=A )
self.assertListEqual(list(inputs.keys() ) ,["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A = processor.batch_decode(A )
__A = tokenizer.batch_decode(A )
self.assertListEqual(A ,A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = self.prepare_image_inputs()
__A = processor(text=A ,images=A )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,["pixel_values", "input_ids", "attention_mask"] )
| 55 | 0 |
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowerCAmelCase__ :
"""simple docstring"""
@property
def UpperCAmelCase__ ( self : str ) -> Dict:
"""simple docstring"""
return self.get_dummy_input()
@property
def UpperCAmelCase__ ( self : int ) -> Tuple:
"""simple docstring"""
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.' )
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=False , ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 4
__SCREAMING_SNAKE_CASE = 32
__SCREAMING_SNAKE_CASE = (32, 32)
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = torch.device(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = (batch_size, num_channels) + sizes
__SCREAMING_SNAKE_CASE = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {"""hidden_states""": hidden_states}
if include_temb:
__SCREAMING_SNAKE_CASE = 128
__SCREAMING_SNAKE_CASE = randn_tensor((batch_size, temb_channels) , generator=__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE )
if include_res_hidden_states_tuple:
__SCREAMING_SNAKE_CASE = torch.manual_seed(1 )
__SCREAMING_SNAKE_CASE = (randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE ),)
if include_encoder_hidden_states:
__SCREAMING_SNAKE_CASE = floats_tensor((batch_size, 32, 32) ).to(__SCREAMING_SNAKE_CASE )
if include_skip_sample:
__SCREAMING_SNAKE_CASE = randn_tensor(((batch_size, 3) + sizes) , generator=__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE )
return dummy_input
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {
"""in_channels""": 32,
"""out_channels""": 32,
"""temb_channels""": 128,
}
if self.block_type == "up":
__SCREAMING_SNAKE_CASE = 32
if self.block_type == "mid":
init_dict.pop("""out_channels""" )
__SCREAMING_SNAKE_CASE = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : Tuple ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prepare_init_args_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = self.block_class(**__SCREAMING_SNAKE_CASE )
unet_block.to(__SCREAMING_SNAKE_CASE )
unet_block.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = unet_block(**__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = output[0]
self.assertEqual(output.shape , self.output_shape )
__SCREAMING_SNAKE_CASE = output[0, -1, -3:, -3:]
__SCREAMING_SNAKE_CASE = torch.tensor(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
assert torch_all_close(output_slice.flatten() , __SCREAMING_SNAKE_CASE , atol=5E-3 )
@unittest.skipIf(torch_device == """mps""" , """Training is not supported in mps""" )
def UpperCAmelCase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prepare_init_args_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = self.block_class(**__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
__SCREAMING_SNAKE_CASE = model(**__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = output[0]
__SCREAMING_SNAKE_CASE = torch.device(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = randn_tensor(output.shape , device=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.nn.functional.mse_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
loss.backward()
| 627 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int ,A : Any ,A : List[str] ,A : Union[str, Any]=10_24 ,A : int=10_24 ,A : Optional[Any]=3.6 ):
__A = tokenizer
__A = tokenizer.bos_token_id
__A = dataset
__A = seq_length
__A = seq_length * chars_per_token * num_of_sequences
def __iter__( self : List[Any] ):
__A = iter(self.dataset )
__A = True
while more_examples:
__A , __A = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(A )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
__A = False
break
__A = tokenizer(A ,truncation=A )["input_ids"]
__A = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 ,len(A ) ,self.seq_length ):
__A = all_token_ids[i : i + self.seq_length]
if len(A ) == self.seq_length:
yield torch.tensor(A )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
__A = {"streaming": True}
__A = load_dataset(args.dataset_name , split="train" , **a_ )
__A = ConstantLengthDataset(a_ , a_ , seq_length=args.seq_length )
__A = DataLoader(a_ , batch_size=args.batch_size )
return eval_dataloader
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
model.eval()
__A = []
for step, batch in enumerate(a_ ):
with torch.no_grad():
__A = model(a_ , labels=a_ )
__A = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(a_ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__A = torch.mean(torch.cat(a_ ) )
try:
__A = torch.exp(a_ )
except OverflowError:
__A = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
SCREAMING_SNAKE_CASE :Optional[int] = Accelerator()
# Parse configuration
SCREAMING_SNAKE_CASE :str = HfArgumentParser(EvaluationArguments)
SCREAMING_SNAKE_CASE :int = parser.parse_args()
set_seed(args.seed)
# Logging
SCREAMING_SNAKE_CASE :Dict = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
SCREAMING_SNAKE_CASE :List[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
SCREAMING_SNAKE_CASE :int = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
SCREAMING_SNAKE_CASE :List[str] = create_dataloader(args)
# Prepare everything with our `accelerator`.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :List[Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :List[Any] = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 55 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
__snake_case = logging.getLogger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
_a : List[str] = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_a : int = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_a : List[Any] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_a : List[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_a : Any = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
_a : List[Any] = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_a : int = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
_a : Any = field(default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''The input training data file (a text file).'''} )
_a : Optional[int] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
_a : str = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
_a : Union[str, Any] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
_a : Optional[int] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_a : List[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
_a : int = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
_a : List[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def UpperCAmelCase__( self ) -> List[str]:
if self.train_file is not None:
lowercase__ : int = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
lowercase__ : Tuple = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
_a : Tuple = 42
_a : Dict = True
_a : Union[str, Any] = None
_a : Optional[Any] = None
def __call__( self , lowerCamelCase__ ) -> Any:
lowercase__ : Any = """label""" if """label""" in features[0].keys() else """labels"""
lowercase__ : Dict = [feature.pop(lowerCamelCase__ ) for feature in features]
lowercase__ : Any = len(lowerCamelCase__ )
lowercase__ : Optional[Any] = len(features[0]["""input_ids"""] )
lowercase__ : Optional[Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(lowerCamelCase__ )] for feature in features
]
lowercase__ : List[str] = list(chain(*lowerCamelCase__ ) )
lowercase__ : List[str] = self.tokenizer.pad(
lowerCamelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
lowercase__ : List[Any] = {k: v.view(lowerCamelCase__ , lowerCamelCase__ , -1 ) for k, v in batch.items()}
# Add back labels
lowercase__ : str = torch.tensor(lowerCamelCase__ , dtype=torch.intaa )
return batch
def _lowerCamelCase ( ):
lowercase__ : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ : Optional[int] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , a_ , a_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase__ : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(a_ )
datasets.utils.logging.set_verbosity(a_ )
transformers.utils.logging.set_verbosity(a_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowercase__ : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
lowercase__ : Tuple = {}
if data_args.train_file is not None:
lowercase__ : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
lowercase__ : Any = data_args.validation_file
lowercase__ : Optional[Any] = data_args.train_file.split(""".""" )[-1]
lowercase__ : str = load_dataset(
a_ , data_files=a_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
lowercase__ : Optional[int] = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ : Any = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
lowercase__ : Dict = [f'''ending{i}''' for i in range(4 )]
lowercase__ : int = """sent1"""
lowercase__ : List[Any] = """sent2"""
if data_args.max_seq_length is None:
lowercase__ : int = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
lowercase__ : Optional[int] = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
lowercase__ : str = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCamelCase__ : Dict ):
lowercase__ : Dict = [[context] * 4 for context in examples[context_name]]
lowercase__ : Optional[int] = examples[question_header_name]
lowercase__ : Union[str, Any] = [
[f'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(a_ )
]
# Flatten out
lowercase__ : Optional[int] = list(chain(*a_ ) )
lowercase__ : Union[str, Any] = list(chain(*a_ ) )
# Tokenize
lowercase__ : int = tokenizer(
a_ , a_ , truncation=a_ , max_length=a_ , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(a_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
lowercase__ : Dict = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
lowercase__ : Optional[Any] = min(len(a_ ) , data_args.max_train_samples )
lowercase__ : str = train_dataset.select(range(a_ ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
lowercase__ : str = train_dataset.map(
a_ , batched=a_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
lowercase__ : Optional[Any] = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
lowercase__ : Optional[int] = min(len(a_ ) , data_args.max_eval_samples )
lowercase__ : List[str] = eval_dataset.select(range(a_ ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
lowercase__ : Any = eval_dataset.map(
a_ , batched=a_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
lowercase__ : Optional[Any] = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=a_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCamelCase__ : List[str] ):
lowercase__ , lowercase__ : List[str] = eval_predictions
lowercase__ : int = np.argmax(a_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
lowercase__ : Tuple = Trainer(
model=a_ , args=a_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=a_ , data_collator=a_ , compute_metrics=a_ , )
# Training
if training_args.do_train:
lowercase__ : Union[str, Any] = None
if training_args.resume_from_checkpoint is not None:
lowercase__ : int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__ : Dict = last_checkpoint
lowercase__ : Tuple = trainer.train(resume_from_checkpoint=a_ )
trainer.save_model() # Saves the tokenizer too for easy upload
lowercase__ : int = train_result.metrics
lowercase__ : Dict = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a_ )
)
lowercase__ : List[Any] = min(a_ , len(a_ ) )
trainer.log_metrics("""train""" , a_ )
trainer.save_metrics("""train""" , a_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowercase__ : int = trainer.evaluate()
lowercase__ : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a_ )
lowercase__ : List[str] = min(a_ , len(a_ ) )
trainer.log_metrics("""eval""" , a_ )
trainer.save_metrics("""eval""" , a_ )
lowercase__ : Optional[int] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**a_ )
else:
trainer.create_model_card(**a_ )
def _lowerCamelCase ( lowerCamelCase__ : Any ):
main()
if __name__ == "__main__":
main()
| 200 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = LayoutLMTokenizer
snake_case_ = LayoutLMTokenizerFast
snake_case_ = True
snake_case_ = True
def UpperCamelCase_ ( self : Any ):
super().setUp()
__A = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCamelCase_ ( self : Tuple ,**A : int ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**A )
def UpperCamelCase_ ( self : Optional[Any] ,A : Any ):
__A = "UNwant\u00E9d,running"
__A = "unwanted, running"
return input_text, output_text
def UpperCamelCase_ ( self : str ):
__A = self.tokenizer_class(self.vocab_file )
__A = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(A ,["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,[7, 4, 5, 10, 8, 9] )
def UpperCamelCase_ ( self : int ):
pass
| 55 | 0 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase_ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ) -> List[str]:
"""simple docstring"""
snake_case_ : Union[str, Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
snake_case_ : int = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=512 , _UpperCamelCase=512 ) -> List[Any]:
"""simple docstring"""
snake_case_ : Any = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
snake_case_ : Dict = np.array(pil_image.convert('''RGB''' ) )
snake_case_ : Dict = arr.astype(np.floataa ) / 127.5 - 1
snake_case_ : Any = np.transpose(a_ , [2, 0, 1] )
snake_case_ : Optional[int] = torch.from_numpy(a_ ).unsqueeze(0 )
return image
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=__magic_name__ , scheduler=__magic_name__ , movq=__magic_name__ , )
snake_case_ : int = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = min(int(num_inference_steps * strength ) , __magic_name__ )
snake_case_ : Tuple = max(num_inference_steps - init_timestep , 0 )
snake_case_ : Optional[int] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ) -> Optional[int]:
'''simple docstring'''
if not isinstance(__magic_name__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__magic_name__ )}''' )
snake_case_ : Dict = image.to(device=__magic_name__ , dtype=__magic_name__ )
snake_case_ : int = batch_size * num_images_per_prompt
if image.shape[1] == 4:
snake_case_ : List[str] = image
else:
if isinstance(__magic_name__ , __magic_name__ ) and len(__magic_name__ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(__magic_name__ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
elif isinstance(__magic_name__ , __magic_name__ ):
snake_case_ : Optional[int] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__magic_name__ )
]
snake_case_ : Tuple = torch.cat(__magic_name__ , dim=0 )
else:
snake_case_ : int = self.movq.encode(__magic_name__ ).latent_dist.sample(__magic_name__ )
snake_case_ : List[str] = self.movq.config.scaling_factor * init_latents
snake_case_ : Optional[int] = torch.cat([init_latents] , dim=0 )
snake_case_ : Union[str, Any] = init_latents.shape
snake_case_ : Tuple = randn_tensor(__magic_name__ , generator=__magic_name__ , device=__magic_name__ , dtype=__magic_name__ )
# get latents
snake_case_ : str = self.scheduler.add_noise(__magic_name__ , __magic_name__ , __magic_name__ )
snake_case_ : Any = init_latents
return latents
def lowerCamelCase (self , __magic_name__=0 ) -> Optional[int]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
snake_case_ : Optional[Any] = torch.device(F'''cuda:{gpu_id}''' )
snake_case_ : Union[str, Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__magic_name__ , __magic_name__ )
def lowerCamelCase (self , __magic_name__=0 ) -> Optional[int]:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
snake_case_ : int = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=__magic_name__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case_ : List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
snake_case_ , snake_case_ : Optional[int] = cpu_offload_with_hook(__magic_name__ , __magic_name__ , prev_module_hook=__magic_name__ )
# We'll offload the last model manually.
snake_case_ : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__magic_name__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__magic_name__ )
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 512 , __magic_name__ = 512 , __magic_name__ = 100 , __magic_name__ = 4.0 , __magic_name__ = 0.3 , __magic_name__ = 1 , __magic_name__ = None , __magic_name__ = "pil" , __magic_name__ = True , ) -> Tuple:
'''simple docstring'''
snake_case_ : str = self._execution_device
snake_case_ : int = guidance_scale > 1.0
if isinstance(__magic_name__ , __magic_name__ ):
snake_case_ : Dict = torch.cat(__magic_name__ , dim=0 )
snake_case_ : Optional[Any] = image_embeds.shape[0]
if isinstance(__magic_name__ , __magic_name__ ):
snake_case_ : str = torch.cat(__magic_name__ , dim=0 )
if do_classifier_free_guidance:
snake_case_ : Tuple = image_embeds.repeat_interleave(__magic_name__ , dim=0 )
snake_case_ : str = negative_image_embeds.repeat_interleave(__magic_name__ , dim=0 )
snake_case_ : int = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__magic_name__ )
if not isinstance(__magic_name__ , __magic_name__ ):
snake_case_ : str = [image]
if not all(isinstance(__magic_name__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F'''Input is in incorrect format: {[type(__magic_name__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor''' )
snake_case_ : List[str] = torch.cat([prepare_image(__magic_name__ , __magic_name__ , __magic_name__ ) for i in image] , dim=0 )
snake_case_ : Union[str, Any] = image.to(dtype=image_embeds.dtype , device=__magic_name__ )
snake_case_ : Tuple = self.movq.encode(__magic_name__ )['''latents''']
snake_case_ : List[str] = latents.repeat_interleave(__magic_name__ , dim=0 )
self.scheduler.set_timesteps(__magic_name__ , device=__magic_name__ )
snake_case_ , snake_case_ : Union[str, Any] = self.get_timesteps(__magic_name__ , __magic_name__ , __magic_name__ )
snake_case_ : List[Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
snake_case_ , snake_case_ : Optional[int] = downscale_height_and_width(__magic_name__ , __magic_name__ , self.movq_scale_factor )
snake_case_ : Tuple = self.prepare_latents(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , image_embeds.dtype , __magic_name__ , __magic_name__ )
for i, t in enumerate(self.progress_bar(__magic_name__ ) ):
# expand the latents if we are doing classifier free guidance
snake_case_ : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case_ : int = {'''image_embeds''': image_embeds}
snake_case_ : Union[str, Any] = self.unet(
sample=__magic_name__ , timestep=__magic_name__ , encoder_hidden_states=__magic_name__ , added_cond_kwargs=__magic_name__ , return_dict=__magic_name__ , )[0]
if do_classifier_free_guidance:
snake_case_ , snake_case_ : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
snake_case_ , snake_case_ : List[str] = noise_pred.chunk(2 )
snake_case_ , snake_case_ : str = variance_pred.chunk(2 )
snake_case_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case_ : List[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case_ , snake_case_ : str = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case_ : Union[str, Any] = self.scheduler.step(
__magic_name__ , __magic_name__ , __magic_name__ , generator=__magic_name__ , )[0]
# post-processing
snake_case_ : Any = self.movq.decode(__magic_name__ , force_not_quantize=__magic_name__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
snake_case_ : Optional[int] = image * 0.5 + 0.5
snake_case_ : List[Any] = image.clamp(0 , 1 )
snake_case_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case_ : Optional[Any] = self.numpy_to_pil(__magic_name__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__magic_name__ )
| 60 |
SCREAMING_SNAKE_CASE :int = {str(digit): digit**5 for digit in range(10)}
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(a_ ) )
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(a_ ) )
if __name__ == "__main__":
print(solution())
| 55 | 0 |
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
__UpperCAmelCase = ''
__UpperCAmelCase = ''
__UpperCAmelCase = ''
__UpperCAmelCase = ''
def lowercase__ ( lowerCAmelCase__ : Optional[Any] ) -> None:
'''simple docstring'''
a__ : Union[str, Any] = tweepy.OAuthHandler(a_ , a_ )
auth.set_access_token(a_ , a_ )
a__ : Union[str, Any] = tweepy.API(a_ )
# initialize a list to hold all the tweepy Tweets
a__ : int = []
# make initial request for most recent tweets (200 is the maximum allowed count)
a__ : List[Any] = api.user_timeline(screen_name=a_ , count=2_0_0 )
# save most recent tweets
alltweets.extend(a_ )
# save the id of the oldest tweet less one
a__ : Any = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(a_ ) > 0:
print(F"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
a__ : int = api.user_timeline(
screen_name=a_ , count=2_0_0 , max_id=a_ )
# save most recent tweets
alltweets.extend(a_ )
# update the id of the oldest tweet less one
a__ : str = alltweets[-1].id - 1
print(F"...{len(a_ )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
a__ : Union[str, Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"new_{screen_name}_tweets.csv" , "w" ) as f:
a__ : int = csv.writer(a_ )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(a_ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 642 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self : Any ):
__A = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A ,"tf_padding" ) )
self.parent.assertTrue(hasattr(A ,"depth_multiplier" ) )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] ,A : int ,A : List[Any]=13 ,A : int=3 ,A : Optional[Any]=32 ,A : Union[str, Any]=0.25 ,A : Tuple=8 ,A : Optional[int]=True ,A : Union[str, Any]=10_24 ,A : Any=32 ,A : Optional[int]="relu6" ,A : int=0.1 ,A : Optional[Any]=0.02 ,A : Optional[Any]=True ,A : List[str]=True ,A : str=10 ,A : str=None ,):
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = depth_multiplier
__A = min_depth
__A = tf_padding
__A = int(last_hidden_size * depth_multiplier )
__A = output_stride
__A = hidden_act
__A = classifier_dropout_prob
__A = use_labels
__A = is_training
__A = num_labels
__A = initializer_range
__A = scope
def UpperCamelCase_ ( self : Optional[int] ):
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.num_labels )
__A = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
__A = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self : Any ):
return MobileNetVaConfig(
num_channels=self.num_channels ,image_size=self.image_size ,depth_multiplier=self.depth_multiplier ,min_depth=self.min_depth ,tf_padding=self.tf_padding ,hidden_act=self.hidden_act ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,)
def UpperCamelCase_ ( self : Optional[int] ,A : str ,A : Tuple ,A : Optional[int] ,A : List[str] ):
__A = MobileNetVaModel(config=A )
model.to(A )
model.eval()
__A = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def UpperCamelCase_ ( self : List[Any] ,A : Union[str, Any] ,A : List[Any] ,A : int ,A : Union[str, Any] ):
__A = self.num_labels
__A = MobileNetVaForImageClassification(A )
model.to(A )
model.eval()
__A = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : Tuple ):
__A = self.prepare_config_and_inputs()
__A , __A , __A , __A = config_and_inputs
__A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
snake_case_ = (
{"feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def UpperCamelCase_ ( self : Any ):
__A = MobileNetVaModelTester(self )
__A = MobileNetVaConfigTester(self ,config_class=A ,has_text_modality=A )
def UpperCamelCase_ ( self : str ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds" )
def UpperCamelCase_ ( self : Union[str, Any] ):
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings" )
def UpperCamelCase_ ( self : Tuple ):
pass
@unittest.skip(reason="MobileNetV1 does not output attentions" )
def UpperCamelCase_ ( self : Any ):
pass
def UpperCamelCase_ ( self : Optional[int] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
__A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,A )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Optional[int] ):
def check_hidden_states_output(A : List[Any] ,A : List[Any] ,A : Optional[int] ):
__A = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(A ,A ) )
__A = outputs.hidden_states
__A = 26
self.assertEqual(len(A ) ,A )
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = True
check_hidden_states_output(A ,A ,A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A = True
check_hidden_states_output(A ,A ,A )
def UpperCamelCase_ ( self : Tuple ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def UpperCamelCase_ ( self : Union[str, Any] ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = MobileNetVaModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
__A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : List[str] ):
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None
)
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
__A = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(A )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="pt" ).to(A )
# forward pass
with torch.no_grad():
__A = model(**A )
# verify the logits
__A = torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape ,A )
__A = torch.tensor([-4.17_39, -1.12_33, 3.12_05] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,A ,atol=1E-4 ) )
| 55 | 0 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
_A : str = logging.get_logger(__name__)
def __magic_name__ ( __snake_case : List[Any] , __snake_case : Union[str, Any] ) -> List[str]:
def run_func(__snake_case : str ):
@wraps(a_ )
def run_in_eager_mode(*__snake_case : int , **__snake_case : str ):
return func(*a_ , **a_ )
@wraps(a_ )
@tf.function(experimental_compile=a_ )
def run_in_graph_mode(*__snake_case : Any , **__snake_case : Any ):
return func(*a_ , **a_ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __magic_name__ ( __snake_case : Any , __snake_case : Optional[Any] , __snake_case : Optional[int] ) -> ["tf.Tensor"]:
lowercase : Tuple = random.Random()
lowercase : Union[str, Any] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(a_ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class a__ ( __SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = """TensorFlow"""
@property
def __magic_name__ ( self ):
return tf.__version__
def __magic_name__ ( self , _a , _a , _a ):
# initialize GPU on separate process
lowercase : Optional[Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
lowercase : Dict = self._prepare_inference_func(_a , _a , _a )
return self._measure_speed(_inference )
def __magic_name__ ( self , _a , _a , _a ):
lowercase : Optional[Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
lowercase : List[str] = self._prepare_train_func(_a , _a , _a )
return self._measure_speed(_train )
def __magic_name__ ( self , _a , _a , _a ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _a )
lowercase : Union[str, Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
lowercase : List[Any] = self._prepare_inference_func(_a , _a , _a )
return self._measure_memory(_inference )
def __magic_name__ ( self , _a , _a , _a ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _a )
lowercase : Any = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
lowercase : Optional[int] = self._prepare_train_func(_a , _a , _a )
return self._measure_memory(_train )
def __magic_name__ ( self , _a , _a , _a ):
lowercase : Dict = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
lowercase : Optional[Any] = (
hasattr(_a , "architectures" )
and isinstance(config.architectures , _a )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowercase : Any = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
lowercase : int = __import__("transformers" , fromlist=[model_class] )
lowercase : Dict = getattr(_a , _a )
lowercase : Optional[Any] = model_cls(_a )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
lowercase : str = TF_MODEL_MAPPING[config.__class__](_a )
# encoder-decoder has vocab size saved differently
lowercase : List[str] = config.vocab_size if hasattr(_a , "vocab_size" ) else config.encoder.vocab_size
lowercase : Optional[int] = random_input_ids(_a , _a , _a )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(_a , decoder_input_ids=_a , training=_a )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(_a , training=_a )
lowercase : Union[str, Any] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __magic_name__ ( self , _a , _a , _a ):
lowercase : Union[str, Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
lowercase : List[Any] = (
hasattr(_a , "architectures" )
and isinstance(config.architectures , _a )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowercase : Optional[int] = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
lowercase : Union[str, Any] = __import__("transformers" , fromlist=[model_class] )
lowercase : Optional[Any] = getattr(_a , _a )
lowercase : Tuple = model_cls(_a )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
lowercase : Optional[Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_a )
# encoder-decoder has vocab size saved differently
lowercase : Union[str, Any] = config.vocab_size if hasattr(_a , "vocab_size" ) else config.encoder.vocab_size
lowercase : Optional[Any] = random_input_ids(_a , _a , _a )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
lowercase : Optional[int] = model(_a , decoder_input_ids=_a , labels=_a , training=_a )[0]
lowercase : Dict = tf.gradients(_a , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
lowercase : Any = model(_a , labels=_a , training=_a )[0]
lowercase : List[str] = tf.gradients(_a , model.trainable_variables )
return gradients
lowercase : Union[str, Any] = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __magic_name__ ( self , _a ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(_a , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
lowercase : Optional[Any] = timeit.repeat(
_a , repeat=self.args.repeat , number=10 , )
return min(_a ) / 1_0.0
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn\'t fit on GPU. {e}""" )
def __magic_name__ ( self , _a ):
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
lowercase : Union[str, Any] = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
lowercase : Tuple = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
lowercase : Dict = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
lowercase : List[Any] = nvml.nvmlDeviceGetMemoryInfo(_a )
lowercase : List[Any] = meminfo.used
lowercase : str = Memory(_a )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
lowercase : int = None
else:
lowercase : List[Any] = measure_peak_memory_cpu(_a )
lowercase : Optional[Any] = Memory(_a ) if isinstance(_a , _a ) else memory_bytes
if self.args.trace_memory_line_by_line:
lowercase : List[Any] = stop_memory_tracing(_a )
if memory is None:
lowercase : Tuple = summary.total
else:
lowercase : Dict = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn\'t fit on GPU. {e}""" )
return "N/A", None
| 361 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : str ,A : int ,A : int=2 ,A : Optional[Any]=3 ,A : Dict=4 ,A : Optional[int]=2 ,A : Union[str, Any]=7 ,A : List[str]=True ,A : Union[str, Any]=True ,A : Optional[int]=True ,A : Optional[int]=True ,A : Tuple=99 ,A : Optional[int]=36 ,A : Dict=3 ,A : str=4 ,A : Optional[Any]=37 ,A : Dict="gelu" ,A : Dict=0.1 ,A : Union[str, Any]=0.1 ,A : Union[str, Any]=5_12 ,A : Any=16 ,A : Union[str, Any]=2 ,A : List[Any]=0.02 ,A : List[Any]=6 ,A : Optional[int]=6 ,A : List[Any]=3 ,A : Union[str, Any]=4 ,A : Tuple=None ,A : List[str]=10_00 ,):
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = patch_size
__A = text_seq_length
__A = is_training
__A = use_input_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = coordinate_size
__A = shape_size
__A = num_labels
__A = num_choices
__A = scope
__A = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__A = text_seq_length
__A = (image_size // patch_size) ** 2 + 1
__A = self.text_seq_length + self.image_seq_length
def UpperCamelCase_ ( self : int ):
__A = ids_tensor([self.batch_size, self.text_seq_length] ,self.vocab_size )
__A = ids_tensor([self.batch_size, self.text_seq_length, 4] ,self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__A = bbox[i, j, 3]
__A = bbox[i, j, 1]
__A = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__A = bbox[i, j, 2]
__A = bbox[i, j, 0]
__A = t
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_input_mask:
__A = random_attention_mask([self.batch_size, self.text_seq_length] )
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.text_seq_length] ,self.type_vocab_size )
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.text_seq_length] ,self.num_labels )
__A = LayoutLMvaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,coordinate_size=self.coordinate_size ,shape_size=self.shape_size ,input_size=self.image_size ,patch_size=self.patch_size ,)
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase_ ( self : Optional[int] ,A : List[str] ,A : Any ,A : Dict ,A : List[Any] ,A : Optional[int] ,A : Any ,A : Dict ,A : List[Any] ):
__A = LayoutLMvaModel(config=A )
model.to(A )
model.eval()
# text + image
__A = model(A ,pixel_values=A )
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A )
__A = model(A ,bbox=A ,pixel_values=A ,token_type_ids=A )
__A = model(A ,bbox=A ,pixel_values=A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
# text only
__A = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__A = model(pixel_values=A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Optional[int] ,A : Dict ,A : List[str] ,A : Any ,A : List[Any] ,A : Any ,A : Any ,A : Dict ,A : Optional[Any] ):
__A = self.num_labels
__A = LayoutLMvaForSequenceClassification(A )
model.to(A )
model.eval()
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A ,labels=A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : str ,A : Optional[Any] ,A : Dict ,A : str ,A : Tuple ,A : Union[str, Any] ,A : List[Any] ,A : Any ,A : Union[str, Any] ):
__A = self.num_labels
__A = LayoutLMvaForTokenClassification(config=A )
model.to(A )
model.eval()
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A ,labels=A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Optional[int] ,A : Optional[Any] ,A : int ,A : str ,A : List[str] ,A : int ,A : List[str] ,A : List[str] ,A : Dict ):
__A = LayoutLMvaForQuestionAnswering(config=A )
model.to(A )
model.eval()
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A ,start_positions=A ,end_positions=A ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : str ):
__A = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = config_and_inputs
__A = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case_ = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self : str ,A : Any ,A : Any ,A : Tuple ,A : List[Any] ,A : Optional[Any] ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = LayoutLMvaModelTester(self )
__A = ConfigTester(self ,config_class=A ,hidden_size=37 )
def UpperCamelCase_ ( self : List[Any] ,A : int ,A : List[str] ,A : Dict=False ):
__A = copy.deepcopy(A )
if model_class in get_values(A ):
__A = {
k: v.unsqueeze(1 ).expand(-1 ,self.model_tester.num_choices ,-1 ).contiguous()
if isinstance(A ,torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(A ):
__A = torch.ones(self.model_tester.batch_size ,dtype=torch.long ,device=A )
elif model_class in get_values(A ):
__A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
__A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
elif model_class in [
*get_values(A ),
]:
__A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
elif model_class in [
*get_values(A ),
]:
__A = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) ,dtype=torch.long ,device=A ,)
return inputs_dict
def UpperCamelCase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : str ):
__A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__A = type
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
def UpperCamelCase_ ( self : str ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = LayoutLMvaModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
__A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : Any ):
return LayoutLMvaImageProcessor(apply_ocr=A ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self : Dict ):
__A = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(A )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="pt" ).pixel_values.to(A )
__A = torch.tensor([[1, 2]] )
__A = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
__A = model(
input_ids=input_ids.to(A ) ,bbox=bbox.to(A ) ,pixel_values=pixel_values.to(A ) ,)
# verify the logits
__A = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape ,A )
__A = torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,A ,atol=1E-4 ) )
| 55 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self ,snake_case ,snake_case=7 ,snake_case=3 ,snake_case=30 ,snake_case=400 ,snake_case=True ,snake_case=None ,snake_case=0.9 ,snake_case=None ,snake_case=True ,snake_case=[0.5, 0.5, 0.5] ,snake_case=[0.5, 0.5, 0.5] ,):
'''simple docstring'''
lowercase : Tuple = size if size is not None else {"""shortest_edge""": 30}
lowercase : Any = crop_size if crop_size is not None else {"""height""": 30, """width""": 30}
lowercase : List[str] = parent
lowercase : Tuple = batch_size
lowercase : Dict = num_channels
lowercase : Dict = min_resolution
lowercase : Tuple = max_resolution
lowercase : Union[str, Any] = do_resize_and_center_crop
lowercase : Tuple = size
lowercase : Any = crop_pct
lowercase : Optional[int] = crop_size
lowercase : Optional[int] = do_normalize
lowercase : int = image_mean
lowercase : str = image_std
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_a : Any= PoolFormerImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = PoolFormerImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case ,"""do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(snake_case ,"""size""" ) )
self.assertTrue(hasattr(snake_case ,"""crop_pct""" ) )
self.assertTrue(hasattr(snake_case ,"""do_normalize""" ) )
self.assertTrue(hasattr(snake_case ,"""image_mean""" ) )
self.assertTrue(hasattr(snake_case ,"""image_std""" ) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 30} )
self.assertEqual(image_processor.crop_size ,{"""height""": 30, """width""": 30} )
lowercase : int = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case ,Image.Image )
# Test not batched input
lowercase : str = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowercase : Any = image_processing(snake_case ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase : Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case ,numpify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case ,np.ndarray )
# Test not batched input
lowercase : str = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowercase : Any = image_processing(snake_case ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase : Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case ,torchify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case ,torch.Tensor )
# Test not batched input
lowercase : List[Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowercase : Optional[int] = image_processing(snake_case ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
| 336 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any ,A : List[str] ,A : str=7 ,A : Optional[Any]=3 ,A : Any=18 ,A : int=30 ,A : int=4_00 ,A : List[str]=True ,A : Union[str, Any]=None ,A : Union[str, Any]=True ,A : Tuple=None ,A : Tuple=True ,A : Union[str, Any]=[0.5, 0.5, 0.5] ,A : str=[0.5, 0.5, 0.5] ,A : List[Any]=False ,):
__A = size if size is not None else {"height": 20, "width": 20}
__A = crop_size if crop_size is not None else {"height": 18, "width": 18}
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size
__A = do_center_crop
__A = crop_size
__A = do_normalize
__A = image_mean
__A = image_std
__A = do_reduce_labels
def UpperCamelCase_ ( self : List[str] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
__A = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__A = Image.open(dataset[0]["file"] )
__A = Image.open(dataset[1]["file"] )
return image, map
def UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
__A = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__A = Image.open(ds[0]["file"] )
__A = Image.open(ds[1]["file"] )
__A = Image.open(ds[2]["file"] )
__A = Image.open(ds[3]["file"] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = BeitImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : List[Any] ):
__A = BeitImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : int ):
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A ,"do_resize" ) )
self.assertTrue(hasattr(A ,"size" ) )
self.assertTrue(hasattr(A ,"do_center_crop" ) )
self.assertTrue(hasattr(A ,"center_crop" ) )
self.assertTrue(hasattr(A ,"do_normalize" ) )
self.assertTrue(hasattr(A ,"image_mean" ) )
self.assertTrue(hasattr(A ,"image_std" ) )
def UpperCamelCase_ ( self : List[str] ):
__A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"height": 20, "width": 20} )
self.assertEqual(image_processor.crop_size ,{"height": 18, "width": 18} )
self.assertEqual(image_processor.do_reduce_labels ,A )
__A = self.image_processing_class.from_dict(
self.image_processor_dict ,size=42 ,crop_size=84 ,reduce_labels=A )
self.assertEqual(image_processor.size ,{"height": 42, "width": 42} )
self.assertEqual(image_processor.crop_size ,{"height": 84, "width": 84} )
self.assertEqual(image_processor.do_reduce_labels ,A )
def UpperCamelCase_ ( self : List[Any] ):
pass
def UpperCamelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A ,Image.Image )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : List[str] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A )
for image in image_inputs:
self.assertIsInstance(A ,np.ndarray )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : int ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : str ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
__A = []
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__A = image_processing(image_inputs[0] ,maps[0] ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test batched
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test not batched input (PIL images)
__A , __A = prepare_semantic_single_inputs()
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test batched input (PIL images)
__A , __A = prepare_semantic_batch_inputs()
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
def UpperCamelCase_ ( self : Dict ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__A , __A = prepare_semantic_single_inputs()
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 1_50 )
__A = True
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
| 55 | 0 |
'''simple docstring'''
def _snake_case ( A_ : Optional[Any] ):
"""simple docstring"""
a_ : str = abs(a_ )
a_ : Dict = 0
while n > 0:
res += n % 10
n //= 10
return res
def _snake_case ( A_ : Tuple ):
"""simple docstring"""
a_ : int = abs(a_ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _snake_case ( A_ : List[str] ):
"""simple docstring"""
return sum(int(a_ ) for c in str(abs(a_ ) ) )
def _snake_case ( ):
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(A_ : Optional[int] , A_ : Optional[Any] ) -> None:
a_ : List[str] = f'''{func.__name__}({value})'''
a_ : Tuple = timeit(f'''__main__.{call}''' , setup="""import __main__""" )
print(f'''{call:56} = {func(a_ )} -- {timing:.4f} seconds''' )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(a_ , a_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 577 |
from numpy import exp, pi, sqrt
def UpperCAmelCase ( a_ , a_ = 0.0 , a_ = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55 | 0 |
"""simple docstring"""
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__snake_case : Optional[int] = 2
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Dict , *, # begin keyword-only arguments
_lowerCamelCase : Tuple="<s>" , _lowerCamelCase : Optional[Any]="<pad>" , _lowerCamelCase : Tuple="</s>" , _lowerCamelCase : Tuple="<unk>" , _lowerCamelCase : Union[str, Any]=None , ):
A__ , A__ , A__ , A__ = bos, unk, pad, eos
A__ = []
A__ = []
A__ = {}
A__ = self.add_symbol(_lowerCamelCase )
A__ = self.add_symbol(_lowerCamelCase )
A__ = self.add_symbol(_lowerCamelCase )
A__ = self.add_symbol(_lowerCamelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_lowerCamelCase )
A__ = len(self.symbols )
def __eq__( self : Any , _lowerCamelCase : str ):
return self.indices == other.indices
def __getitem__( self : int , _lowerCamelCase : Tuple ):
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : Any ):
return len(self.symbols )
def __contains__( self : Optional[Any] , _lowerCamelCase : Optional[int] ):
return sym in self.indices
@classmethod
def A__ ( cls : Any , _lowerCamelCase : Union[str, Any] ):
A__ = cls()
d.add_from_file(_lowerCamelCase )
return d
def A__ ( self : Union[str, Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str=1 , _lowerCamelCase : int=False ):
if word in self.indices and not overwrite:
A__ = self.indices[word]
A__ = self.count[idx] + n
return idx
else:
A__ = len(self.symbols )
A__ = idx
self.symbols.append(_lowerCamelCase )
self.count.append(_lowerCamelCase )
return idx
def A__ ( self : Dict , _lowerCamelCase : Optional[int] ):
return 0
def A__ ( self : Dict , _lowerCamelCase : List[Any] ):
if isinstance(_lowerCamelCase , _lowerCamelCase ):
try:
with open(_lowerCamelCase , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(_lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(_lowerCamelCase ) )
return
A__ = f.readlines()
A__ = self._load_meta(_lowerCamelCase )
for line in lines[indices_start_line:]:
try:
A__ , A__ = line.rstrip().rsplit(''' ''' , 1 )
if field == "#fairseq:overwrite":
A__ = True
A__ , A__ = line.rsplit(''' ''' , 1 )
else:
A__ = False
A__ = int(_lowerCamelCase )
A__ = line
if word in self and not overwrite:
raise RuntimeError(
'''Duplicate word found when loading Dictionary: \'{}\'. '''
'''Duplicate words can overwrite earlier ones by adding the '''
'''#fairseq:overwrite flag at the end of the corresponding row '''
'''in the dictionary file. If using the Camembert model, please '''
'''download an updated copy of the model file.'''.format(_lowerCamelCase ) )
self.add_symbol(_lowerCamelCase , n=_lowerCamelCase , overwrite=_lowerCamelCase )
except ValueError:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''' )
def a_ ( __a ):
A__ = dict((re.sub(r'''@@$''' , '''''' , a_ ), v) if k.endswith('''@@''' ) else (re.sub(r'''$''' , '''</w>''' , a_ ), v) for k, v in d.items() )
A__ = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
A__ = d[k] # restore
return da
def a_ ( __a , __a ):
if not os.path.exists(a_ ):
raise ValueError(f'''path {biogpt_checkpoint_path} does not exist!''' )
os.makedirs(a_ , exist_ok=a_ )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
A__ = os.path.join(a_ , '''checkpoint.pt''' )
if not os.path.isfile(a_ ):
raise ValueError(f'''path to the file {checkpoint_file} does not exist!''' )
A__ = torch.load(a_ , map_location='''cpu''' )
A__ = chkpt['''cfg''']['''model''']
# dicts
A__ = os.path.join(a_ , '''dict.txt''' )
if not os.path.isfile(a_ ):
raise ValueError(f'''path to the file {dict_file} does not exist!''' )
A__ = Dictionary.load(a_ )
A__ = rewrite_dict_keys(src_dict.indices )
A__ = len(a_ )
A__ = os.path.join(a_ , VOCAB_FILES_NAMES['''vocab_file'''] )
print(f'''Generating {src_vocab_file} of {src_vocab_size} records''' )
with open(a_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(a_ , ensure_ascii=a_ , indent=a_ ) )
# merges_file (bpecodes)
A__ = os.path.join(a_ , '''bpecodes''' )
if not os.path.isfile(a_ ):
raise ValueError(f'''path to the file {bpecodes_file} does not exist!''' )
A__ = os.path.join(a_ , VOCAB_FILES_NAMES['''merges_file'''] )
shutil.copyfile(a_ , a_ )
# model config
A__ = os.path.join(a_ , '''config.json''' )
A__ = {
'''activation_dropout''': args['''activation_dropout'''],
'''architectures''': ['''BioGptForCausalLM'''],
'''attention_probs_dropout_prob''': args['''attention_dropout'''],
'''bos_token_id''': 0,
'''eos_token_id''': 2,
'''hidden_act''': args['''activation_fn'''],
'''hidden_dropout_prob''': args['''dropout'''],
'''hidden_size''': args['''decoder_embed_dim'''],
'''initializer_range''': 0.02,
'''intermediate_size''': args['''decoder_ffn_embed_dim'''],
'''layer_norm_eps''': 1e-12,
'''layerdrop''': args['''decoder_layerdrop'''],
'''max_position_embeddings''': args['''max_target_positions'''],
'''model_type''': '''biogpt''',
'''num_attention_heads''': args['''decoder_attention_heads'''],
'''num_hidden_layers''': args['''decoder_layers'''],
'''pad_token_id''': 1,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_decoder_input_output_embed'''],
'''vocab_size''': src_vocab_size,
}
# good hparam defaults to start with
print(f'''Generating {biogpt_model_config_file}''' )
with open(a_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(a_ , ensure_ascii=a_ , indent=a_ ) )
# tokenizer config
A__ = os.path.join(a_ , a_ )
A__ = {
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
'''model_max_length''': 1024,
'''pad_token''': '''<pad>''',
'''special_tokens_map_file''': None,
'''tokenizer_class''': '''BioGptTokenizer''',
'''unk_token''': '''<unk>''',
}
print(f'''Generating {biogpt_tokenizer_config_file}''' )
with open(a_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(a_ , ensure_ascii=a_ , indent=a_ ) )
# model
A__ = chkpt['''model''']
# remove unneeded keys
A__ = [
'''decoder.version''',
]
for k in ignore_keys:
model_state_dict.pop(a_ , a_ )
A__ = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('''output_projection.weight''' ):
A__ = model_state_dict.pop(a_ )
else:
A__ = model_state_dict.pop(a_ )
A__ = BioGptConfig.from_pretrained(a_ )
A__ = BioGptForCausalLM(a_ )
# check that it loads ok
model_new.load_state_dict(a_ )
# save
A__ = os.path.join(a_ , a_ )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(a_ , a_ )
print('''Conversion is done!''' )
if __name__ == "__main__":
__snake_case : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__snake_case : List[Any] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 571 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCamelCase_ ( self : Optional[int] ):
__A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
__A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
__A = "xvjiarui/stable-diffusion-2-inpainting"
__A , __A = FlaxStableDiffusionInpaintPipeline.from_pretrained(A ,safety_checker=A )
__A = "Face of a yellow cat, high resolution, sitting on a park bench"
__A = jax.random.PRNGKey(0 )
__A = 50
__A = jax.device_count()
__A = num_samples * [prompt]
__A = num_samples * [init_image]
__A = num_samples * [mask_image]
__A , __A , __A = pipeline.prepare_inputs(A ,A ,A )
# shard inputs and rng
__A = replicate(A )
__A = jax.random.split(A ,jax.device_count() )
__A = shard(A )
__A = shard(A )
__A = shard(A )
__A = pipeline(
A ,A ,A ,A ,A ,A ,jit=A )
__A = output.images.reshape(A ,5_12 ,5_12 ,3 )
__A = images[0, 2_53:2_56, 2_53:2_56, -1]
__A = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__A = jnp.array(
[0.3_61_13_07, 0.37_64_97_36, 0.3_75_74_08, 0.38_21_39_53, 0.39_29_51_67, 0.3_84_16_31, 0.41_55_49_78, 0.4_13_74_75, 0.4_21_70_84] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 55 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 211 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any ,A : Optional[int] ,A : Optional[int]=7 ,A : Optional[Any]=3 ,A : List[str]=18 ,A : Any=30 ,A : Tuple=4_00 ,A : Union[str, Any]=True ,A : Optional[Any]=32 ,A : Union[str, Any]=True ,):
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size_divisor
__A = do_rescale
def UpperCamelCase_ ( self : Union[str, Any] ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = GLPNImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : int ):
__A = GLPNImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Any ):
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A ,"do_resize" ) )
self.assertTrue(hasattr(A ,"size_divisor" ) )
self.assertTrue(hasattr(A ,"resample" ) )
self.assertTrue(hasattr(A ,"do_rescale" ) )
def UpperCamelCase_ ( self : str ):
pass
def UpperCamelCase_ ( self : Dict ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A ,Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def UpperCamelCase_ ( self : Optional[Any] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A )
for image in image_inputs:
self.assertIsInstance(A ,np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def UpperCamelCase_ ( self : int ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 55 | 0 |
"""simple docstring"""
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def A ( __snake_case: List[str] , __snake_case: Optional[Any] , **__snake_case: int ) -> int:
"""simple docstring"""
__magic_name__ = AutoConfig.from_pretrained(a_ , **a_ )
__magic_name__ = AutoModelForSeqaSeqLM.from_config(a_ )
model.save_pretrained(a_ )
AutoTokenizer.from_pretrained(a_ ).save_pretrained(a_ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 545 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case_ = Features({"image": Image()} )
snake_case_ = Features({"labels": ClassLabel} )
snake_case_ = "image"
snake_case_ = "labels"
def UpperCamelCase_ ( self : Optional[Any] ,A : Tuple ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] ,A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
__A = copy.deepcopy(self )
__A = self.label_schema.copy()
__A = features[self.label_column]
__A = label_schema
return task_template
@property
def UpperCamelCase_ ( self : Any ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 55 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __a :
SCREAMING_SNAKE_CASE__ : List[str] = 42
SCREAMING_SNAKE_CASE__ : int = 42
class __a :
def __init__( self , a__ ):
_lowerCamelCase = [[] for _ in range(a__ )]
_lowerCamelCase = size
def __getitem__( self , a__ ):
return iter(self._graph[vertex] )
@property
def snake_case_ ( self ):
return self._size
def snake_case_ ( self , a__ , a__ , a__ ):
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(a__ , a__ ) )
def snake_case_ ( self , a__ , a__ ):
_lowerCamelCase = deque([start_vertex] )
_lowerCamelCase = [None] * self.size
_lowerCamelCase = 0
while queue:
_lowerCamelCase = queue.popleft()
_lowerCamelCase = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_lowerCamelCase = current_distance + edge.weight
_lowerCamelCase = distances[edge.destination_vertex]
if (
isinstance(a__ , a__ )
and new_distance >= dest_vertex_distance
):
continue
_lowerCamelCase = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 650 |
from math import sqrt
def UpperCAmelCase ( a_ ) -> bool:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number >= 0
), "'number' must been an int and positive"
__A = True
# 0 and 1 are none primes.
if number <= 1:
__A = False
for divisor in range(2 , int(round(sqrt(a_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__A = False
break
# precondition
assert isinstance(a_ , a_ ), "'status' must been from type bool"
return status
def UpperCAmelCase ( a_ ) -> Any:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__A = list(range(2 , n + 1 ) )
__A = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(a_ ) ):
for j in range(i + 1 , len(a_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__A = 0
# filters actual prime numbers.
__A = [x for x in begin_list if x != 0]
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n > 2), "'N' must been an int and > 2"
__A = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(a_ ):
ans.append(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and number >= 0, "'number' must been an int and >= 0"
__A = [] # this list will be returns of the function.
# potential prime number factors.
__A = 2
__A = number
if number == 0 or number == 1:
ans.append(a_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(a_ ):
while quotient != 1:
if is_prime(a_ ) and (quotient % factor == 0):
ans.append(a_ )
quotient /= factor
else:
factor += 1
else:
ans.append(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase ( a_ ) -> Any:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__A = 0
# prime factorization of 'number'
__A = prime_factorization(a_ )
__A = max(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type int"
return ans
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__A = 0
# prime factorization of 'number'
__A = prime_factorization(a_ )
__A = min(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type int"
return ans
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
assert isinstance(a_ , a_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , a_ ), "compare bust been from type bool"
return number % 2 == 0
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
assert isinstance(a_ , a_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , a_ ), "compare bust been from type bool"
return number % 2 != 0
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
assert (
isinstance(a_ , a_ ) and (number > 2) and is_even(a_ )
), "'number' must been an int, even and > 2"
__A = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__A = get_prime_numbers(a_ )
__A = len(a_ )
# run variable for while-loops.
__A = 0
__A = None
# exit variable. for break up the loops
__A = True
while i < len_pn and loop:
__A = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__A = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(a_ , a_ )
and (len(a_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def UpperCAmelCase ( a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__A = 0
while numbera != 0:
__A = numbera % numbera
__A = numbera
__A = rest
# precondition
assert isinstance(a_ , a_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def UpperCAmelCase ( a_ , a_ ) -> List[str]:
"""simple docstring"""
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__A = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__A = prime_factorization(a_ )
__A = prime_factorization(a_ )
elif numbera == 1 or numbera == 1:
__A = []
__A = []
__A = max(a_ , a_ )
__A = 0
__A = 0
__A = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__A = prime_fac_a.count(a_ )
__A = prime_fac_a.count(a_ )
for _ in range(max(a_ , a_ ) ):
ans *= n
else:
__A = prime_fac_a.count(a_ )
for _ in range(a_ ):
ans *= n
done.append(a_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__A = prime_fac_a.count(a_ )
for _ in range(a_ ):
ans *= n
done.append(a_ )
# precondition
assert isinstance(a_ , a_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 0), "'number' must been a positive int"
__A = 0
__A = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(a_ ):
ans += 1
# precondition
assert isinstance(a_ , a_ ) and is_prime(
a_ ), "'ans' must been a prime number and from type int"
return ans
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
assert (
is_prime(a_ ) and is_prime(a_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__A = p_number_a + 1 # jump to the next number
__A = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(a_ ):
number += 1
while number < p_number_a:
ans.append(a_ )
number += 1
# fetch the next prime number.
while not is_prime(a_ ):
number += 1
# precondition
assert (
isinstance(a_ , a_ )
and ans[0] != p_number_a
and ans[len(a_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 1), "'n' must been int and >= 1"
__A = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(a_ )
# precondition
assert ans[0] == 1 and ans[len(a_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number > 1
), "'number' must been an int and >= 1"
__A = get_divisors(a_ )
# precondition
assert (
isinstance(a_ , a_ )
and (divisors[0] == 1)
and (divisors[len(a_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def UpperCAmelCase ( a_ , a_ ) -> str:
"""simple docstring"""
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__A = gcd(abs(a_ ) , abs(a_ ) )
# precondition
assert (
isinstance(a_ , a_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 0), "'n' must been a int and >= 0"
__A = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 0), "'n' must been an int and >= 0"
__A = 0
__A = 1
__A = 1 # this will be return
for _ in range(n - 1 ):
__A = ans
ans += fiba
__A = tmp
return ans
| 55 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : int = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 627 |
import os
def UpperCAmelCase ( ) -> Any:
"""simple docstring"""
__A = os.path.dirname(os.path.realpath(a_ ) )
__A = os.path.join(a_ , "triangle.txt" )
with open(a_ ) as f:
__A = f.readlines()
__A = []
for line in triangle:
__A = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(a_ ) )
a.append(a_ )
for i in range(1 , len(a_ ) ):
for j in range(len(a[i] ) ):
__A = a[i - 1][j] if j != len(a[i - 1] ) else 0
__A = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(a_ , a_ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 55 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=[30, 30] , lowerCamelCase__=2 , lowerCamelCase__=3 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=10 , lowerCamelCase__=0.02 , lowerCamelCase__=3 , lowerCamelCase__=None , lowerCamelCase__=8 , lowerCamelCase__=10 , ) -> str:
lowercase__ : List[str] = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Dict = image_size
lowercase__ : int = patch_size
lowercase__ : Optional[int] = num_channels
lowercase__ : Any = is_training
lowercase__ : Tuple = use_labels
lowercase__ : List[str] = hidden_size
lowercase__ : int = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : List[Any] = intermediate_size
lowercase__ : Any = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : List[Any] = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : Optional[int] = num_labels
lowercase__ : Union[str, Any] = scope
lowercase__ : List[str] = n_targets
lowercase__ : Optional[int] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowercase__ : Optional[Any] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowercase__ : Tuple = num_patches + 1 + self.num_detection_tokens
def UpperCAmelCase__( self ) -> Dict:
lowercase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowercase__ : Union[str, Any] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowercase__ : str = []
for i in range(self.batch_size ):
lowercase__ : Union[str, Any] = {}
lowercase__ : Tuple = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=lowerCamelCase__ )
lowercase__ : Tuple = torch.rand(self.n_targets , 4 , device=lowerCamelCase__ )
labels.append(lowerCamelCase__ )
lowercase__ : int = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__( self ) -> Union[str, Any]:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
lowercase__ : Dict = YolosModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase__ : Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
lowercase__ : Optional[int] = YolosForObjectDetection(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase__ : Union[str, Any] = model(pixel_values=lowerCamelCase__ )
lowercase__ : Dict = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowercase__ : str = model(pixel_values=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def UpperCAmelCase__( self ) -> Union[str, Any]:
lowercase__ : str = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : List[str] = config_and_inputs
lowercase__ : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_a : Optional[Any] = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_a : List[Any] = (
{'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {}
)
_a : List[Any] = False
_a : Tuple = False
_a : Any = False
_a : Any = False
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Optional[int]:
lowercase__ : List[str] = super()._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowercase__ : List[Any] = []
for i in range(self.model_tester.batch_size ):
lowercase__ : Tuple = {}
lowercase__ : int = torch.ones(
size=(self.model_tester.n_targets,) , device=lowerCamelCase__ , dtype=torch.long )
lowercase__ : Union[str, Any] = torch.ones(
self.model_tester.n_targets , 4 , device=lowerCamelCase__ , dtype=torch.float )
labels.append(lowerCamelCase__ )
lowercase__ : str = labels
return inputs_dict
def UpperCAmelCase__( self ) -> Dict:
lowercase__ : Optional[int] = YolosModelTester(self )
lowercase__ : List[Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def UpperCAmelCase__( self ) -> Dict:
self.config_tester.run_common_tests()
def UpperCAmelCase__( self ) -> Tuple:
# YOLOS does not use inputs_embeds
pass
def UpperCAmelCase__( self ) -> Union[str, Any]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Tuple = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def UpperCAmelCase__( self ) -> str:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = model_class(lowerCamelCase__ )
lowercase__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : List[Any] = [*signature.parameters.keys()]
lowercase__ : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> List[Any]:
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCAmelCase__( self ) -> str:
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = True
# in YOLOS, the seq_len is different
lowercase__ : int = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowercase__ : List[Any] = True
lowercase__ : int = False
lowercase__ : Optional[Any] = True
lowercase__ : Tuple = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
lowercase__ : Union[str, Any] = outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : Dict = True
lowercase__ : Dict = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
lowercase__ : Tuple = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
lowercase__ : Union[str, Any] = outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ : Union[str, Any] = len(lowerCamelCase__ )
# Check attention is always last and order is fine
lowercase__ : List[str] = True
lowercase__ : Dict = True
lowercase__ : Optional[int] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
lowercase__ : Any = 1
self.assertEqual(out_len + added_hidden_states , len(lowerCamelCase__ ) )
lowercase__ : str = outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def UpperCAmelCase__( self ) -> Optional[int]:
def check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ : int = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
lowercase__ : Tuple = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
lowercase__ : Union[str, Any] = outputs.hidden_states
lowercase__ : str = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
# YOLOS has a different seq_length
lowercase__ : List[str] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Dict = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Any = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> Optional[Any]:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*lowerCamelCase__ )
@slow
def UpperCAmelCase__( self ) -> Optional[Any]:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Tuple = YolosModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _lowerCamelCase ( ):
lowercase__ : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase__( self ) -> List[str]:
return AutoImageProcessor.from_pretrained("""hustvl/yolos-small""" ) if is_vision_available() else None
@slow
def UpperCAmelCase__( self ) -> List[Any]:
lowercase__ : List[str] = YolosForObjectDetection.from_pretrained("""hustvl/yolos-small""" ).to(lowerCamelCase__ )
lowercase__ : Optional[Any] = self.default_image_processor
lowercase__ : Optional[int] = prepare_img()
lowercase__ : str = image_processor(images=lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
lowercase__ : int = model(inputs.pixel_values )
# verify outputs
lowercase__ : Tuple = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
lowercase__ : List[Any] = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=lowerCamelCase__ , )
lowercase__ : Optional[int] = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
# verify postprocessing
lowercase__ : Union[str, Any] = image_processor.post_process_object_detection(
lowerCamelCase__ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowercase__ : Any = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(lowerCamelCase__ )
lowercase__ : Dict = [75, 75, 17, 63, 17]
lowercase__ : List[str] = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(lowerCamelCase__ )
self.assertEqual(len(results["""scores"""] ) , 5 )
self.assertTrue(torch.allclose(results["""scores"""] , lowerCamelCase__ , atol=1E-4 ) )
self.assertSequenceEqual(results["""labels"""].tolist() , lowerCamelCase__ )
self.assertTrue(torch.allclose(results["""boxes"""][0, :] , lowerCamelCase__ ) )
| 200 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
SCREAMING_SNAKE_CASE :Union[str, Any] = object()
# For specifying empty leaf dict `{}`
SCREAMING_SNAKE_CASE :List[str] = object()
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
__A = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(a_ ) - len(a_ ) + 1 ):
__A = [x.match(a_ ) for x, y in zip(a_ , ks[i:] )]
if matches and all(a_ ):
return True
return False
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
def replace(a_ , a_ ):
for rule, replacement in rules:
if _match(a_ , a_ ):
return replacement
return val
return replace
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , a_ )),
(("transformer", "wte", "embedding"), P("mp" , a_ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(a_ , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , a_ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(a_ , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , a_ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
__A = _get_partition_rules()
__A = _replacement_rules(a_ )
__A = {k: _unmatched for k in flatten_dict(a_ )}
__A = {k: replace(a_ , a_ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(a_ ) )
| 55 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'SEW_PRETRAINED_MODEL_ARCHIVE_LIST',
'SEWForCTC',
'SEWForSequenceClassification',
'SEWModel',
'SEWPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] ,A : Union[str, Any] ,A : List[Any]=13 ,A : Optional[Any]=30 ,A : Union[str, Any]=2 ,A : Union[str, Any]=3 ,A : Any=True ,A : Dict=True ,A : str=32 ,A : Tuple=2 ,A : Optional[int]=4 ,A : Tuple=37 ,A : List[Any]="gelu" ,A : Dict=0.1 ,A : Optional[int]=0.1 ,A : List[Any]=10 ,A : Optional[Any]=0.02 ,A : Dict=3 ,A : Dict=None ,A : List[Any]=2 ,):
__A = parent
__A = batch_size
__A = image_size
__A = patch_size
__A = num_channels
__A = is_training
__A = use_labels
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = type_sequence_label_size
__A = initializer_range
__A = scope
__A = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__A = (image_size // patch_size) ** 2
__A = num_patches + 2
def UpperCamelCase_ ( self : List[Any] ):
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__A = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : Optional[int] ):
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=A ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def UpperCamelCase_ ( self : List[str] ,A : List[Any] ,A : Optional[int] ,A : Union[str, Any] ):
__A = TFDeiTModel(config=A )
__A = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : List[Any] ,A : List[Any] ,A : Optional[Any] ,A : Dict ):
__A = TFDeiTForMaskedImageModeling(config=A )
__A = model(A )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__A = 1
__A = TFDeiTForMaskedImageModeling(A )
__A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A = model(A )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase_ ( self : Optional[Any] ,A : Union[str, Any] ,A : Dict ,A : Union[str, Any] ):
__A = self.type_sequence_label_size
__A = TFDeiTForImageClassification(A )
__A = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__A = 1
__A = TFDeiTForImageClassification(A )
__A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self : str ):
__A = self.prepare_config_and_inputs()
__A , __A , __A = config_and_inputs
__A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
snake_case_ = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def UpperCamelCase_ ( self : str ):
__A = TFDeiTModelTester(self )
__A = ConfigTester(self ,config_class=A ,has_text_modality=A ,hidden_size=37 )
def UpperCamelCase_ ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def UpperCamelCase_ ( self : Union[str, Any] ):
pass
def UpperCamelCase_ ( self : List[Any] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
__A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A ,tf.keras.layers.Dense ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
__A = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,A )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCamelCase_ ( self : Optional[int] ,A : Union[str, Any] ,A : List[str] ,A : Optional[Any]=False ):
__A = super()._prepare_for_class(A ,A ,return_labels=A )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def UpperCamelCase_ ( self : Any ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = TFDeiTModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
__A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : int ):
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase_ ( self : Optional[int] ):
__A = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="tf" )
# forward pass
__A = model(**A )
# verify the logits
__A = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape ,A )
__A = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,A ,atol=1E-4 ) )
| 55 | 0 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def lowercase__ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict = 1_6 ) -> List[Any]:
'''simple docstring'''
a__ : str = AutoTokenizer.from_pretrained("bert-base-cased" )
a__ : Dict = load_dataset("glue" , "mrpc" )
def tokenize_function(lowerCAmelCase__ : int ):
# max_length=None => use the model max length (it's actually the default)
a__ : Union[str, Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=a_ , max_length=a_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
a__ : str = datasets.map(
a_ , batched=a_ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a__ : List[str] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowerCAmelCase__ : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
a__ : Optional[Any] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
a__ : Dict = 1_6
elif accelerator.mixed_precision != "no":
a__ : Optional[int] = 8
else:
a__ : Optional[Any] = None
return tokenizer.pad(
a_ , padding="longest" , max_length=a_ , pad_to_multiple_of=a_ , return_tensors="pt" , )
# Instantiate dataloaders.
a__ : Union[str, Any] = DataLoader(
tokenized_datasets["train"] , shuffle=a_ , collate_fn=a_ , batch_size=a_ )
a__ : Any = DataLoader(
tokenized_datasets["validation"] , shuffle=a_ , collate_fn=a_ , batch_size=a_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__UpperCAmelCase = mocked_dataloaders # noqa: F811
def lowercase__ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] ) -> str:
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS" , a_ ) == "1":
a__ : Tuple = 2
# New Code #
a__ : Any = int(args.gradient_accumulation_steps )
a__ : Optional[Any] = int(args.local_sgd_steps )
# Initialize accelerator
a__ : List[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=a_ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a__ : List[Any] = config["lr"]
a__ : List[str] = int(config["num_epochs"] )
a__ : List[Any] = int(config["seed"] )
a__ : int = int(config["batch_size"] )
a__ : Dict = evaluate.load("glue" , "mrpc" )
set_seed(a_ )
a__ , a__ : Dict = get_dataloaders(a_ , a_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a__ : int = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=a_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a__ : Any = model.to(accelerator.device )
# Instantiate optimizer
a__ : Union[str, Any] = AdamW(params=model.parameters() , lr=a_ )
# Instantiate scheduler
a__ : Tuple = get_linear_schedule_with_warmup(
optimizer=a_ , num_warmup_steps=1_0_0 , num_training_steps=(len(a_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a__ , a__ , a__ , a__ , a__ : Dict = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ )
# Now we train the model
for epoch in range(a_ ):
model.train()
with LocalSGD(
accelerator=a_ , model=a_ , local_sgd_steps=a_ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(a_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(a_ ):
a__ : List[Any] = model(**a_ )
a__ : str = output.loss
accelerator.backward(a_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(a_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
a__ : List[str] = model(**a_ )
a__ : Dict = outputs.logits.argmax(dim=-1 )
a__ , a__ : Optional[int] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=a_ , references=a_ , )
a__ : Dict = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , a_ )
def lowercase__ ( ) -> int:
'''simple docstring'''
a__ : Union[str, Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=a_ , default=a_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=a_ , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument(
"--local_sgd_steps" , type=a_ , default=8 , help="Number of local SGD steps or None to disable local SGD" )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
a__ : Any = parser.parse_args()
a__ : str = {"lr": 2E-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(a_ , a_ )
if __name__ == "__main__":
main()
| 642 |
SCREAMING_SNAKE_CASE :List[Any] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
SCREAMING_SNAKE_CASE :Union[str, Any] = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
SCREAMING_SNAKE_CASE :int = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def UpperCAmelCase ( a_ , a_ , a_ ) -> str:
"""simple docstring"""
assert len(str(a_ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 1_2, "month should be between 1 to 12"
assert 1 <= day <= 3_1, "day should be between 1 to 31"
# Doomsday algorithm:
__A = year // 1_0_0
__A = (5 * (century % 4) + 2) % 7
__A = year % 1_0_0
__A = centurian % 1_2
__A = (
(centurian // 1_2) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__A = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_0_0) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__A = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_A : Tuple = {
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] = [
'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST',
'NezhaForNextSentencePrediction',
'NezhaForMaskedLM',
'NezhaForPreTraining',
'NezhaForMultipleChoice',
'NezhaForQuestionAnswering',
'NezhaForSequenceClassification',
'NezhaForTokenClassification',
'NezhaModel',
'NezhaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
_A : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 361 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def UpperCAmelCase ( a_ = "isbn/0140328726" ) -> dict:
"""simple docstring"""
__A = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
__A = F'''{olid} is not a valid Open Library olid'''
raise ValueError(a_ )
return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json()
def UpperCAmelCase ( a_ ) -> dict:
"""simple docstring"""
__A = {
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
__A = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__A = [
get_openlibrary_data(author["key"] )["name"] for author in data["Authors"]
]
__A = data["First sentence"]["value"]
for key, value in data.items():
if isinstance(a_ , a_ ):
__A = ", ".join(a_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
SCREAMING_SNAKE_CASE :int = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
SCREAMING_SNAKE_CASE :Any = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print('\n'.join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 55 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
_a : Union[str, Any]= field(default="question-answering-extractive" , metadata={"include_in_asdict_even_if_is_default": True} )
_a : Optional[Any]= Features({"question": Value("string" ), "context": Value("string" )} )
_a : Any= Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
_a : int= "question"
_a : int= "context"
_a : str= "answers"
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 336 |
import requests
SCREAMING_SNAKE_CASE :List[str] = 'YOUR API KEY'
def UpperCAmelCase ( a_ , a_ = giphy_api_key ) -> list:
"""simple docstring"""
__A = "+".join(query.split() )
__A = F'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
__A = requests.get(a_ ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 55 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=30 , lowerCAmelCase_=2 , lowerCAmelCase_=3 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=32 , lowerCAmelCase_=2 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=10 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3 , lowerCAmelCase_=None , lowerCAmelCase_=2 , ):
'''simple docstring'''
a_ : int = parent
a_ : Union[str, Any] = batch_size
a_ : Dict = image_size
a_ : Any = patch_size
a_ : int = num_channels
a_ : Union[str, Any] = is_training
a_ : Any = use_labels
a_ : Any = hidden_size
a_ : List[str] = num_hidden_layers
a_ : List[str] = num_attention_heads
a_ : Optional[Any] = intermediate_size
a_ : int = hidden_act
a_ : Optional[int] = hidden_dropout_prob
a_ : Any = attention_probs_dropout_prob
a_ : Tuple = type_sequence_label_size
a_ : int = initializer_range
a_ : Union[str, Any] = scope
a_ : Optional[int] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
a_ : Any = (image_size // patch_size) ** 2
a_ : List[str] = num_patches + 2
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ : Tuple = None
if self.use_labels:
a_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : List[str] = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] = TFDeiTModel(config=lowerCAmelCase_ )
a_ : Any = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
a_ : Any = TFDeiTForMaskedImageModeling(config=lowerCAmelCase_ )
a_ : int = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
a_ : int = 1
a_ : Optional[int] = TFDeiTForMaskedImageModeling(lowerCAmelCase_ )
a_ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a_ : Union[str, Any] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
a_ : Optional[Any] = self.type_sequence_label_size
a_ : List[str] = TFDeiTForImageClassification(lowerCAmelCase_ )
a_ : List[Any] = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a_ : Any = 1
a_ : int = TFDeiTForImageClassification(lowerCAmelCase_ )
a_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a_ : List[Any] = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = self.prepare_config_and_inputs()
a_ , a_ , a_ : Dict = config_and_inputs
a_ : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,unittest.TestCase ):
"""simple docstring"""
a_ = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
a_ = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[Any] = TFDeiTModelTester(self )
a_ : Dict = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def _lowerCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def _lowerCAmelCase ( self ):
'''simple docstring'''
pass
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ , a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : List[str] = model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
a_ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , tf.keras.layers.Dense ) )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ , a_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : Optional[Any] = model_class(lowerCAmelCase_ )
a_ : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : Any = [*signature.parameters.keys()]
a_ : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ):
'''simple docstring'''
a_ : int = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def _lowerCAmelCase ( self ):
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : str = TFDeiTModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def _snake_case ( ):
"""simple docstring"""
a_ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
a_ : int = self.default_image_processor
a_ : Optional[int] = prepare_img()
a_ : Optional[Any] = image_processor(images=lowerCAmelCase_ , return_tensors="""tf""" )
# forward pass
a_ : Optional[int] = model(**lowerCAmelCase_ )
# verify the logits
a_ : Optional[Any] = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
a_ : Tuple = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
| 577 |
import itertools
import math
def UpperCAmelCase ( a_ ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
__A = 2
while True:
if is_prime(a_ ):
yield num
num += 1
def UpperCAmelCase ( a_ = 1_0_0_0_1 ) -> int:
"""simple docstring"""
return next(itertools.islice(prime_generator() , nth - 1 , a_ ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 55 | 0 |
"""simple docstring"""
def a_ ( ):
A__ = 0
for i in range(1 , 1001 ):
total += i**i
return str(a_ )[-10:]
if __name__ == "__main__":
print(solution())
| 571 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCAmelCase ( a_ , a_ , a_ ) -> List[str]:
"""simple docstring"""
__A = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
__A = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(a_ ):
os.makedirs(a_ )
__A = model.state_dict()
def to_tf_var_name(a_ ):
for patt, repl in iter(a_ ):
__A = name.replace(a_ , a_ )
return F'''bert/{name}'''
def create_tf_var(a_ , a_ , a_ ):
__A = tf.dtypes.as_dtype(tensor.dtype )
__A = tf.get_variable(dtype=a_ , shape=tensor.shape , name=a_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(a_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__A = to_tf_var_name(a_ )
__A = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__A = torch_tensor.T
__A = create_tf_var(tensor=a_ , name=a_ , session=a_ )
tf.keras.backend.set_value(a_ , a_ )
__A = session.run(a_ )
print(F'''Successfully created {tf_name}: {np.allclose(a_ , a_ )}''' )
__A = tf.train.Saver(tf.trainable_variables() )
saver.save(a_ , os.path.join(a_ , model_name.replace("-" , "_" ) + ".ckpt" ) )
def UpperCAmelCase ( a_=None ) -> List[Any]:
"""simple docstring"""
__A = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=a_ , required=a_ , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=a_ , default=a_ , required=a_ , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=a_ , required=a_ , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=a_ , required=a_ , help="Directory in which to save tensorflow model" )
__A = parser.parse_args(a_ )
__A = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=a_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 55 | 0 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Dict = inspect.getfile(accelerate.test_utils )
__SCREAMING_SNAKE_CASE : List[str] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
__SCREAMING_SNAKE_CASE : Any = test_metrics
@require_cpu
def a_ ( self ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def a_ ( self ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def a_ ( self ):
self.test_metrics.main()
@require_multi_gpu
def a_ ( self ):
print(f'Found {torch.cuda.device_count()} devices.' )
__SCREAMING_SNAKE_CASE : int = ["torchrun", f'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(a__ , env=os.environ.copy() )
| 211 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE :Any = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Any = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55 | 0 |
"""simple docstring"""
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
snake_case : Optional[Any] = 'scheduler_config.json'
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE):
"""simple docstring"""
__UpperCAmelCase = 1
__UpperCAmelCase = 2
__UpperCAmelCase = 3
__UpperCAmelCase = 4
__UpperCAmelCase = 5
__UpperCAmelCase = 6
__UpperCAmelCase = 7
__UpperCAmelCase = 8
__UpperCAmelCase = 9
__UpperCAmelCase = 10
__UpperCAmelCase = 11
__UpperCAmelCase = 12
__UpperCAmelCase = 13
__UpperCAmelCase = 14
@dataclass
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE):
"""simple docstring"""
__UpperCAmelCase = 42
class UpperCamelCase__ :
"""simple docstring"""
__UpperCAmelCase = SCHEDULER_CONFIG_NAME
__UpperCAmelCase = []
__UpperCAmelCase = True
@classmethod
def a__ ( cls : Union[str, Any] , UpperCamelCase_ : Dict[str, Any] = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : str=False , **UpperCamelCase_ : List[Any] , ):
'''simple docstring'''
__magic_name__ , __magic_name__ , __magic_name__ = cls.load_config(
pretrained_model_name_or_path=UpperCamelCase_ , subfolder=UpperCamelCase_ , return_unused_kwargs=UpperCamelCase_ , return_commit_hash=UpperCamelCase_ , **UpperCamelCase_ , )
return cls.from_config(UpperCamelCase_ , return_unused_kwargs=UpperCamelCase_ , **UpperCamelCase_ )
def a__ ( self : Any , UpperCamelCase_ : Union[str, os.PathLike] , UpperCamelCase_ : bool = False , **UpperCamelCase_ : Dict ):
'''simple docstring'''
self.save_config(save_directory=UpperCamelCase_ , push_to_hub=UpperCamelCase_ , **UpperCamelCase_ )
@property
def a__ ( self : Optional[Any] ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def a__ ( cls : List[str] ):
'''simple docstring'''
__magic_name__ = list(set([cls.__name__] + cls._compatibles ) )
__magic_name__ = importlib.import_module(__name__.split('.' )[0] )
__magic_name__ = [
getattr(UpperCamelCase_ , UpperCamelCase_ ) for c in compatible_classes_str if hasattr(UpperCamelCase_ , UpperCamelCase_ )
]
return compatible_classes
| 545 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE :int = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def UpperCAmelCase ( a_ = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
__A = BeautifulSoup(requests.get(url + location ).content , "html.parser" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("div" , attrs={"data-tn-component": "organicJob"} ):
__A = job.find("a" , attrs={"data-tn-element": "jobTitle"} ).text.strip()
__A = job.find("span" , {"class": "company"} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f'''Job {i:>2} is {job[0]} at {job[1]}''')
| 55 | 0 |
"""simple docstring"""
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
A_ : int =(
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
A_ : Tuple =logging.get_logger(__name__) # pylint: disable=invalid-name
def SCREAMING_SNAKE_CASE_ ( )-> str:
_lowerCamelCase = 'https://pypi.org/pypi/diffusers/json'
_lowerCamelCase = json.loads(request.urlopen(a_ ).read() )['releases'].keys()
return sorted(a_ , key=lambda snake_case : version.Version(a_ ) )
def SCREAMING_SNAKE_CASE_ ( )-> Tuple:
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(a_ )
os.makedirs(a_ , exist_ok=a_ )
_lowerCamelCase = Path(a_ ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def SCREAMING_SNAKE_CASE_ ( snake_case : Any )-> Optional[int]:
init_hf_modules()
_lowerCamelCase = Path(a_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(a_ , exist_ok=a_ )
_lowerCamelCase = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def SCREAMING_SNAKE_CASE_ ( snake_case : Union[str, Any] )-> Dict:
with open(a_ , 'r' , encoding='utf-8' ) as f:
_lowerCamelCase = f.read()
# Imports of the form `import .xxx`
_lowerCamelCase = re.findall('^\s*import\s+\.(\S+)\s*$' , a_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , a_ , flags=re.MULTILINE )
# Unique-ify
return list(set(a_ ) )
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[Any] )-> List[Any]:
_lowerCamelCase = False
_lowerCamelCase = [module_file]
_lowerCamelCase = []
# Let's recurse through all relative imports
while not no_change:
_lowerCamelCase = []
for f in files_to_check:
new_imports.extend(get_relative_imports(a_ ) )
_lowerCamelCase = Path(a_ ).parent
_lowerCamelCase = [str(module_path / m ) for m in new_imports]
_lowerCamelCase = [f for f in new_import_files if f not in all_relative_imports]
_lowerCamelCase = [f'{f}.py' for f in new_import_files]
_lowerCamelCase = len(a_ ) == 0
all_relative_imports.extend(a_ )
return all_relative_imports
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[int] )-> List[Any]:
with open(a_ , 'r' , encoding='utf-8' ) as f:
_lowerCamelCase = f.read()
# Imports of the form `import xxx`
_lowerCamelCase = re.findall('^\s*import\s+(\S+)\s*$' , a_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , a_ , flags=re.MULTILINE )
# Only keep the top-level module
_lowerCamelCase = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
_lowerCamelCase = list(set(a_ ) )
_lowerCamelCase = []
for imp in imports:
try:
importlib.import_module(a_ )
except ImportError:
missing_packages.append(a_ )
if len(a_ ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
f'{", ".join(a_ )}. Run `pip install {" ".join(a_ )}`' )
return get_relative_imports(a_ )
def SCREAMING_SNAKE_CASE_ ( snake_case : List[str] , snake_case : Any )-> Optional[int]:
_lowerCamelCase = module_path.replace(os.path.sep , '.' )
_lowerCamelCase = importlib.import_module(a_ )
if class_name is None:
return find_pipeline_class(a_ )
return getattr(a_ , a_ )
def SCREAMING_SNAKE_CASE_ ( snake_case : List[str] )-> Optional[Any]:
from ..pipelines import DiffusionPipeline
_lowerCamelCase = dict(inspect.getmembers(a_ , inspect.isclass ) )
_lowerCamelCase = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , a_ )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
f' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
f' {loaded_module}.' )
_lowerCamelCase = cls
return pipeline_class
def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : Union[str, Any] , snake_case : Tuple = None , snake_case : Tuple = False , snake_case : Optional[int] = False , snake_case : int = None , snake_case : Optional[int] = None , snake_case : Union[str, Any] = None , snake_case : Dict = False , )-> Optional[Any]:
_lowerCamelCase = str(a_ )
_lowerCamelCase = os.path.join(a_ , a_ )
if os.path.isfile(a_ ):
_lowerCamelCase = module_file_or_url
_lowerCamelCase = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
_lowerCamelCase = get_diffusers_versions()
# cut ".dev0"
_lowerCamelCase = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
_lowerCamelCase = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(f'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
_lowerCamelCase = f'v{revision}'
elif revision == "main":
_lowerCamelCase = revision
else:
raise ValueError(
f'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
f' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
_lowerCamelCase = COMMUNITY_PIPELINES_URL.format(revision=a_ , pipeline=a_ )
try:
_lowerCamelCase = cached_download(
a_ , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , )
_lowerCamelCase = 'git'
_lowerCamelCase = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
_lowerCamelCase = hf_hub_download(
a_ , a_ , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , )
_lowerCamelCase = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
_lowerCamelCase = check_imports(a_ )
# Now we move the module inside our cached dynamic modules.
_lowerCamelCase = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(a_ )
_lowerCamelCase = Path(a_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(a_ , submodule_path / module_file )
for module_needed in modules_needed:
_lowerCamelCase = f'{module_needed}.py'
shutil.copy(os.path.join(a_ , a_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(a_ , a_ ):
_lowerCamelCase = use_auth_token
elif use_auth_token is True:
_lowerCamelCase = HfFolder.get_token()
else:
_lowerCamelCase = None
_lowerCamelCase = model_info(a_ , revision=a_ , token=a_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
_lowerCamelCase = submodule_path / commit_hash
_lowerCamelCase = full_submodule + os.path.sep + commit_hash
create_dynamic_module(a_ )
if not (submodule_path / module_file).exists():
shutil.copy(a_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
a_ , f'{module_needed}.py' , cache_dir=a_ , force_download=a_ , resume_download=a_ , proxies=a_ , use_auth_token=a_ , revision=a_ , local_files_only=a_ , )
return os.path.join(a_ , a_ )
def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : Optional[int] , snake_case : Optional[Any] = None , snake_case : str = None , snake_case : List[Any] = False , snake_case : List[str] = False , snake_case : List[str] = None , snake_case : int = None , snake_case : Any = None , snake_case : Tuple = False , **snake_case : List[str] , )-> Any:
_lowerCamelCase = get_cached_module_file(
a_ , a_ , cache_dir=a_ , force_download=a_ , resume_download=a_ , proxies=a_ , use_auth_token=a_ , revision=a_ , local_files_only=a_ , )
return get_class_in_module(a_ , final_module.replace('.py' , '' ) )
| 650 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : List[str] ):
__A = tempfile.mkdtemp()
__A = BlipImageProcessor()
__A = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
__A = BlipaProcessor(A ,A )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Dict ,**A : int ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**A ).tokenizer
def UpperCamelCase_ ( self : Dict ,**A : Optional[int] ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**A ).image_processor
def UpperCamelCase_ ( self : Dict ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self : Optional[int] ):
__A = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
__A = [Image.fromarray(np.moveaxis(A ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self : Any ):
__A = BlipaProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" )
__A = self.get_image_processor(do_normalize=A ,padding_value=1.0 )
__A = BlipaProcessor.from_pretrained(
self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=A ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,A )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,A )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = self.prepare_image_inputs()
__A = image_processor(A ,return_tensors="np" )
__A = processor(images=A ,return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase_ ( self : Tuple ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = processor(text=A )
__A = tokenizer(A ,return_token_type_ids=A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase_ ( self : int ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = self.prepare_image_inputs()
__A = processor(text=A ,images=A )
self.assertListEqual(list(inputs.keys() ) ,["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A = processor.batch_decode(A )
__A = tokenizer.batch_decode(A )
self.assertListEqual(A ,A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = self.prepare_image_inputs()
__A = processor(text=A ,images=A )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,["pixel_values", "input_ids", "attention_mask"] )
| 55 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
F'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
F'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
F'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
F'stage{idx}.patch_embed.norm.bias',
) )
return embed
def a__ ( a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
F'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
F'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', F'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', F'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', F'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', F'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
token.append((F'cvt.encoder.stages.{idx}.cls_token', """stage2.cls_token""") )
return token
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def a__ ( a__ , a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
__SCREAMING_SNAKE_CASE = 10_00
__SCREAMING_SNAKE_CASE = """huggingface/label-files"""
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = json.load(open(cached_download(hf_hub_url(a_ , a_ , repo_type="""dataset""" ) ) , """r""" ) )
__SCREAMING_SNAKE_CASE = {int(a_ ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = __SCREAMING_SNAKE_CASE = CvtConfig(num_labels=a_ , idalabel=a_ , labelaid=a_ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
__SCREAMING_SNAKE_CASE = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
__SCREAMING_SNAKE_CASE = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__SCREAMING_SNAKE_CASE = [2, 2, 20]
__SCREAMING_SNAKE_CASE = [3, 12, 16]
__SCREAMING_SNAKE_CASE = [1_92, 7_68, 10_24]
__SCREAMING_SNAKE_CASE = CvtForImageClassification(a_ )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = torch.load(a_ , map_location=torch.device("""cpu""" ) )
__SCREAMING_SNAKE_CASE = OrderedDict()
__SCREAMING_SNAKE_CASE = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__SCREAMING_SNAKE_CASE = list_of_state_dict + cls_token(a_ )
__SCREAMING_SNAKE_CASE = list_of_state_dict + embeddings(a_ )
for cnt in range(config.depth[idx] ):
__SCREAMING_SNAKE_CASE = list_of_state_dict + attention(a_ , a_ )
__SCREAMING_SNAKE_CASE = list_of_state_dict + final()
for gg in list_of_state_dict:
print(a_ )
for i in range(len(a_ ) ):
__SCREAMING_SNAKE_CASE = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(a_ )
model.save_pretrained(a_ )
image_processor.save_pretrained(a_ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=3_8_4,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCAmelCase : List[Any] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 627 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int ,A : Any ,A : List[str] ,A : Union[str, Any]=10_24 ,A : int=10_24 ,A : Optional[Any]=3.6 ):
__A = tokenizer
__A = tokenizer.bos_token_id
__A = dataset
__A = seq_length
__A = seq_length * chars_per_token * num_of_sequences
def __iter__( self : List[Any] ):
__A = iter(self.dataset )
__A = True
while more_examples:
__A , __A = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(A )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
__A = False
break
__A = tokenizer(A ,truncation=A )["input_ids"]
__A = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 ,len(A ) ,self.seq_length ):
__A = all_token_ids[i : i + self.seq_length]
if len(A ) == self.seq_length:
yield torch.tensor(A )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
__A = {"streaming": True}
__A = load_dataset(args.dataset_name , split="train" , **a_ )
__A = ConstantLengthDataset(a_ , a_ , seq_length=args.seq_length )
__A = DataLoader(a_ , batch_size=args.batch_size )
return eval_dataloader
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
model.eval()
__A = []
for step, batch in enumerate(a_ ):
with torch.no_grad():
__A = model(a_ , labels=a_ )
__A = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(a_ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__A = torch.mean(torch.cat(a_ ) )
try:
__A = torch.exp(a_ )
except OverflowError:
__A = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
SCREAMING_SNAKE_CASE :Optional[int] = Accelerator()
# Parse configuration
SCREAMING_SNAKE_CASE :str = HfArgumentParser(EvaluationArguments)
SCREAMING_SNAKE_CASE :int = parser.parse_args()
set_seed(args.seed)
# Logging
SCREAMING_SNAKE_CASE :Dict = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
SCREAMING_SNAKE_CASE :List[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
SCREAMING_SNAKE_CASE :int = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
SCREAMING_SNAKE_CASE :List[str] = create_dataloader(args)
# Prepare everything with our `accelerator`.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :List[Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :List[Any] = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 55 | 0 |
"""simple docstring"""
def _lowerCamelCase ( lowerCamelCase__ : Any = 10_00 ):
lowercase__ , lowercase__ : Dict = 1, 1
lowercase__ : List[Any] = []
for i in range(1 , n + 1 ):
lowercase__ : str = prev_numerator + 2 * prev_denominator
lowercase__ : List[Any] = prev_numerator + prev_denominator
if len(str(a_ ) ) > len(str(a_ ) ):
result.append(a_ )
lowercase__ : int = numerator
lowercase__ : Tuple = denominator
return len(a_ )
if __name__ == "__main__":
print(F"{solution() = }")
| 200 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = LayoutLMTokenizer
snake_case_ = LayoutLMTokenizerFast
snake_case_ = True
snake_case_ = True
def UpperCamelCase_ ( self : Any ):
super().setUp()
__A = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCamelCase_ ( self : Tuple ,**A : int ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**A )
def UpperCamelCase_ ( self : Optional[Any] ,A : Any ):
__A = "UNwant\u00E9d,running"
__A = "unwanted, running"
return input_text, output_text
def UpperCamelCase_ ( self : str ):
__A = self.tokenizer_class(self.vocab_file )
__A = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(A ,["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,[7, 4, 5, 10, 8, 9] )
def UpperCamelCase_ ( self : int ):
pass
| 55 | 0 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger()
@dataclass
class __lowerCAmelCase :
lowerCamelCase_ : Optional[Any] = 42
lowerCamelCase_ : List[Any] = field(default_factory=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ : str = field(default_factory=__SCREAMING_SNAKE_CASE )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = len(list(m.modules() ) ) == 1 or isinstance(__magic_name__ , nn.Convad ) or isinstance(__magic_name__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(__magic_name__ )
def __call__(self , __magic_name__ ) -> int:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(__magic_name__ )
[x.remove() for x in self.handles]
return self
@property
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
return list(filter(lambda __magic_name__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __lowerCAmelCase :
lowerCamelCase_ : List[str] = 42
lowerCamelCase_ : Tuple = 42
lowerCamelCase_ : List[Any] = 0
lowerCamelCase_ : Any = field(default_factory=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Dict = field(default_factory=__SCREAMING_SNAKE_CASE )
def __call__(self , __magic_name__ ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = Tracker(self.dest )(__magic_name__ ).parametrized
snake_case_ : Tuple = Tracker(self.src )(__magic_name__ ).parametrized
snake_case_ : Any = list(filter(lambda __magic_name__ : type(__magic_name__ ) not in self.src_skip , __magic_name__ ) )
snake_case_ : Union[str, Any] = list(filter(lambda __magic_name__ : type(__magic_name__ ) not in self.dest_skip , __magic_name__ ) )
if len(__magic_name__ ) != len(__magic_name__ ):
raise Exception(
F'''Numbers of operations are different. Source module has {len(__magic_name__ )} operations while'''
F''' destination module has {len(__magic_name__ )}.''' )
for dest_m, src_m in zip(__magic_name__ , __magic_name__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = True ) -> List[str]:
"""simple docstring"""
print(f'''Converting {name}...''' )
with torch.no_grad():
snake_case_ : List[Any] = timm.create_model(a_ , pretrained=a_ ).eval()
snake_case_ : Union[str, Any] = ResNetForImageClassification(a_ ).eval()
snake_case_ : Tuple = ModuleTransfer(src=a_ , dest=a_ )
snake_case_ : int = torch.randn((1, 3, 224, 224) )
module_transfer(a_ )
assert torch.allclose(from_model(a_ ) , our_model(a_ ).logits ), "The model logits don't match the original one."
snake_case_ : List[Any] = f'''resnet{"-".join(name.split("resnet" ) )}'''
print(a_ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=a_ , )
# we can use the convnext one
snake_case_ : str = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=a_ , )
print(f'''Pushed {checkpoint_name}''' )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = True ) -> Tuple:
"""simple docstring"""
snake_case_ : Tuple = '''imagenet-1k-id2label.json'''
snake_case_ : List[str] = 1_000
snake_case_ : Optional[int] = (1, num_labels)
snake_case_ : Optional[Any] = '''huggingface/label-files'''
snake_case_ : List[Any] = num_labels
snake_case_ : str = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''' ) , '''r''' ) )
snake_case_ : List[str] = {int(a_ ): v for k, v in idalabel.items()}
snake_case_ : List[str] = idalabel
snake_case_ : List[Any] = {v: k for k, v in idalabel.items()}
snake_case_ : int = partial(a_ , num_labels=a_ , idalabel=a_ , labelaid=a_ )
snake_case_ : Tuple = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(a_ , names_to_config[model_name] , a_ , a_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(a_ , a_ , a_ , a_ )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 60 |
SCREAMING_SNAKE_CASE :int = {str(digit): digit**5 for digit in range(10)}
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(a_ ) )
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(a_ ) )
if __name__ == "__main__":
print(solution())
| 55 | 0 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowercase__ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any=False ) -> Dict:
'''simple docstring'''
try:
a__ : Union[str, Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
a__ : Any = default
else:
# KEY is set, convert it to True or False.
try:
a__ : Tuple = strtobool(a_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
__UpperCAmelCase = parse_flag_from_env('''RUN_SLOW''', default=False)
__UpperCAmelCase = parse_flag_from_env('''RUN_REMOTE''', default=False)
__UpperCAmelCase = parse_flag_from_env('''RUN_LOCAL''', default=True)
__UpperCAmelCase = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
__UpperCAmelCase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
__UpperCAmelCase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
__UpperCAmelCase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
__UpperCAmelCase = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
__UpperCAmelCase = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
__UpperCAmelCase = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
__UpperCAmelCase = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def lowercase__ ( lowerCAmelCase__ : Dict ) -> int:
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
a__ : Union[str, Any] = unittest.skip("test requires faiss" )(a_ )
return test_case
def lowercase__ ( lowerCAmelCase__ : List[Any] ) -> Tuple:
'''simple docstring'''
try:
import regex # noqa
except ImportError:
a__ : Dict = unittest.skip("test requires regex" )(a_ )
return test_case
def lowercase__ ( lowerCAmelCase__ : Any ) -> List[str]:
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
a__ : str = unittest.skip("test requires elasticsearch" )(a_ )
return test_case
def lowercase__ ( lowerCAmelCase__ : Any ) -> str:
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
a__ : Tuple = unittest.skip("test requires sqlalchemy" )(a_ )
return test_case
def lowercase__ ( lowerCAmelCase__ : Any ) -> List[str]:
'''simple docstring'''
if not config.TORCH_AVAILABLE:
a__ : Optional[int] = unittest.skip("test requires PyTorch" )(a_ )
return test_case
def lowercase__ ( lowerCAmelCase__ : List[str] ) -> Tuple:
'''simple docstring'''
if not config.TF_AVAILABLE:
a__ : int = unittest.skip("test requires TensorFlow" )(a_ )
return test_case
def lowercase__ ( lowerCAmelCase__ : str ) -> Tuple:
'''simple docstring'''
if not config.JAX_AVAILABLE:
a__ : Union[str, Any] = unittest.skip("test requires JAX" )(a_ )
return test_case
def lowercase__ ( lowerCAmelCase__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if not config.PIL_AVAILABLE:
a__ : Tuple = unittest.skip("test requires Pillow" )(a_ )
return test_case
def lowercase__ ( lowerCAmelCase__ : str ) -> Union[str, Any]:
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers" )(a_ )
else:
return test_case
def lowercase__ ( lowerCAmelCase__ : Optional[int] ) -> Dict:
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken" )(a_ )
else:
return test_case
def lowercase__ ( lowerCAmelCase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy" )(a_ )
else:
return test_case
def lowercase__ ( lowerCAmelCase__ : str ) -> Tuple:
'''simple docstring'''
def _require_spacy_model(lowerCAmelCase__ : int ):
try:
import spacy # noqa F401
spacy.load(a_ )
except ImportError:
return unittest.skip("test requires spacy" )(a_ )
except OSError:
return unittest.skip("test requires spacy model '{}'".format(a_ ) )(a_ )
else:
return test_case
return _require_spacy_model
def lowercase__ ( lowerCAmelCase__ : Dict ) -> List[Any]:
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark" )(a_ )
else:
return test_case
def lowercase__ ( lowerCAmelCase__ : Any ) -> Dict:
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark" )(a_ )
else:
return test_case
def lowercase__ ( lowerCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
a__ : Optional[int] = unittest.skip("test is slow" )(a_ )
return test_case
def lowercase__ ( lowerCAmelCase__ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
a__ : Dict = unittest.skip("test is local" )(a_ )
return test_case
def lowercase__ ( lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
a__ : List[Any] = unittest.skip("test is packaged" )(a_ )
return test_case
def lowercase__ ( lowerCAmelCase__ : List[Any] ) -> Tuple:
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
a__ : Any = unittest.skip("test requires remote" )(a_ )
return test_case
def lowercase__ ( *lowerCAmelCase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
def decorate(cls : Union[str, Any] ):
for name, fn in cls.__dict__.items():
if callable(a_ ) and name.startswith("test" ):
for decorator in decorators:
a__ : Optional[int] = decorator(a_ )
setattr(cls , a_ , a_ )
return cls
return decorate
class __UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
pass
class __UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
__lowerCamelCase : Dict = 0
__lowerCamelCase : List[str] = 1
__lowerCamelCase : int = 2
@contextmanager
def lowercase__ ( lowerCAmelCase__ : Tuple=OfflineSimulationMode.CONNECTION_FAILS , lowerCAmelCase__ : Dict=1E-16 ) -> Any:
'''simple docstring'''
a__ : Union[str, Any] = requests.Session().request
def timeout_request(lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : Optional[Any] ):
# Change the url to an invalid url so that the connection hangs
a__ : Optional[Any] = "https://10.255.255.1"
if kwargs.get("timeout" ) is None:
raise RequestWouldHangIndefinitelyError(
F"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." )
a__ : Dict = timeout
try:
return online_request(a_ , a_ , **a_ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
a__ : int = url
a__ : List[str] = e.args[0]
a__ : Dict = (max_retry_error.args[0].replace("10.255.255.1" , F"OfflineMock[{url}]" ),)
a__ : Optional[int] = (max_retry_error,)
raise
def raise_connection_error(lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : str ):
raise requests.ConnectionError("Offline mode is enabled." , request=a_ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , a_ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , a_ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , a_ ):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum." )
@contextmanager
def lowercase__ ( *lowerCAmelCase__ : Any , **lowerCAmelCase__ : Dict ) -> Tuple:
'''simple docstring'''
a__ : List[str] = str(Path().resolve() )
with tempfile.TemporaryDirectory(*a_ , **a_ ) as tmp_dir:
try:
os.chdir(a_ )
yield
finally:
os.chdir(a_ )
@contextmanager
def lowercase__ ( ) -> Optional[int]:
'''simple docstring'''
import gc
gc.collect()
a__ : str = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowercase__ ( ) -> int:
'''simple docstring'''
import gc
gc.collect()
a__ : List[Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowercase__ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
return deepcopy(a_ ).integers(0 , 1_0_0 , 1_0 ).tolist() == deepcopy(a_ ).integers(0 , 1_0_0 , 1_0 ).tolist()
def lowercase__ ( lowerCAmelCase__ : List[Any] ) -> Any:
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(lowerCAmelCase__ : str , *lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : Union[str, Any] ):
try:
return func(*a_ , **a_ )
except HTTPError as err:
if str(a_ ).startswith("500" ) or str(a_ ).startswith("502" ):
pytest.xfail(str(a_ ) )
raise err
return decorator.decorator(_wrapper , a_ )
class __UpperCAmelCase :
def __init__( self : Any , a_ : Optional[int] , a_ : Union[str, Any] , a_ : List[str] ) -> Any:
'''simple docstring'''
a__ : List[str] = returncode
a__ : Any = stdout
a__ : Dict = stderr
async def lowercase__ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
while True:
a__ : Optional[Any] = await stream.readline()
if line:
callback(a_ )
else:
break
async def lowercase__ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : int=False , lowerCAmelCase__ : int=False ) -> _RunOutput:
'''simple docstring'''
if echo:
print("\nRunning: " , " ".join(a_ ) )
a__ : Optional[Any] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=a_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=a_ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
a__ : List[Any] = []
a__ : Union[str, Any] = []
def tee(lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any]="" ):
a__ : List[str] = line.decode("utf-8" ).rstrip()
sink.append(a_ )
if not quiet:
print(a_ , a_ , file=a_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda lowerCAmelCase__ : tee(a_ , a_ , sys.stdout , label="stdout:" ) ),
_read_stream(p.stderr , lambda lowerCAmelCase__ : tee(a_ , a_ , sys.stderr , label="stderr:" ) ),
] , timeout=a_ , )
return _RunOutput(await p.wait() , a_ , a_ )
def lowercase__ ( lowerCAmelCase__ : int , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : int=1_8_0 , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : Any=True ) -> _RunOutput:
'''simple docstring'''
a__ : Union[str, Any] = asyncio.get_event_loop()
a__ : Dict = loop.run_until_complete(
_stream_subprocess(a_ , env=a_ , stdin=a_ , timeout=a_ , quiet=a_ , echo=a_ ) )
a__ : Optional[int] = " ".join(a_ )
if result.returncode > 0:
a__ : Optional[Any] = "\n".join(result.stderr )
raise RuntimeError(
F"\'{cmd_str}\' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"\'{cmd_str}\' produced no output." )
return result
def lowercase__ ( ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] = os.environ.get("PYTEST_XDIST_WORKER" , "gw0" )
a__ : Optional[int] = re.sub(R"^gw" , "" , a_ , 0 , re.M )
return int(a_ )
def lowercase__ ( ) -> int:
'''simple docstring'''
a__ : Union[str, Any] = 2_9_5_0_0
a__ : Optional[Any] = pytest_xdist_worker_id()
return port + uniq_delta
| 642 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self : Any ):
__A = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A ,"tf_padding" ) )
self.parent.assertTrue(hasattr(A ,"depth_multiplier" ) )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] ,A : int ,A : List[Any]=13 ,A : int=3 ,A : Optional[Any]=32 ,A : Union[str, Any]=0.25 ,A : Tuple=8 ,A : Optional[int]=True ,A : Union[str, Any]=10_24 ,A : Any=32 ,A : Optional[int]="relu6" ,A : int=0.1 ,A : Optional[Any]=0.02 ,A : Optional[Any]=True ,A : List[str]=True ,A : str=10 ,A : str=None ,):
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = depth_multiplier
__A = min_depth
__A = tf_padding
__A = int(last_hidden_size * depth_multiplier )
__A = output_stride
__A = hidden_act
__A = classifier_dropout_prob
__A = use_labels
__A = is_training
__A = num_labels
__A = initializer_range
__A = scope
def UpperCamelCase_ ( self : Optional[int] ):
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.num_labels )
__A = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
__A = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self : Any ):
return MobileNetVaConfig(
num_channels=self.num_channels ,image_size=self.image_size ,depth_multiplier=self.depth_multiplier ,min_depth=self.min_depth ,tf_padding=self.tf_padding ,hidden_act=self.hidden_act ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,)
def UpperCamelCase_ ( self : Optional[int] ,A : str ,A : Tuple ,A : Optional[int] ,A : List[str] ):
__A = MobileNetVaModel(config=A )
model.to(A )
model.eval()
__A = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def UpperCamelCase_ ( self : List[Any] ,A : Union[str, Any] ,A : List[Any] ,A : int ,A : Union[str, Any] ):
__A = self.num_labels
__A = MobileNetVaForImageClassification(A )
model.to(A )
model.eval()
__A = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : Tuple ):
__A = self.prepare_config_and_inputs()
__A , __A , __A , __A = config_and_inputs
__A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
snake_case_ = (
{"feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def UpperCamelCase_ ( self : Any ):
__A = MobileNetVaModelTester(self )
__A = MobileNetVaConfigTester(self ,config_class=A ,has_text_modality=A )
def UpperCamelCase_ ( self : str ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds" )
def UpperCamelCase_ ( self : Union[str, Any] ):
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings" )
def UpperCamelCase_ ( self : Tuple ):
pass
@unittest.skip(reason="MobileNetV1 does not output attentions" )
def UpperCamelCase_ ( self : Any ):
pass
def UpperCamelCase_ ( self : Optional[int] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
__A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,A )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Optional[int] ):
def check_hidden_states_output(A : List[Any] ,A : List[Any] ,A : Optional[int] ):
__A = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(A ,A ) )
__A = outputs.hidden_states
__A = 26
self.assertEqual(len(A ) ,A )
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = True
check_hidden_states_output(A ,A ,A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A = True
check_hidden_states_output(A ,A ,A )
def UpperCamelCase_ ( self : Tuple ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def UpperCamelCase_ ( self : Union[str, Any] ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = MobileNetVaModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
__A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : List[str] ):
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None
)
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
__A = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(A )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="pt" ).to(A )
# forward pass
with torch.no_grad():
__A = model(**A )
# verify the logits
__A = torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape ,A )
__A = torch.tensor([-4.17_39, -1.12_33, 3.12_05] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,A ,atol=1E-4 ) )
| 55 | 0 |
"""simple docstring"""
from math import factorial
class a__ :
def __init__( self , _a , _a ):
lowercase : Optional[int] = real
if isinstance(_a , _a ):
lowercase : Dict = [1] * rank
else:
lowercase : List[Any] = rank
def __repr__( self ):
return (
f"""{self.real}+"""
f"""{"+".join(str(_a )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def __magic_name__ ( self ):
lowercase : str = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , _a )
def __add__( self , _a ):
if not isinstance(_a , _a ):
return Dual(self.real + other , self.duals )
lowercase : int = self.duals.copy()
lowercase : Any = other.duals.copy()
if len(_a ) > len(_a ):
o_dual.extend([1] * (len(_a ) - len(_a )) )
elif len(_a ) < len(_a ):
s_dual.extend([1] * (len(_a ) - len(_a )) )
lowercase : int = []
for i in range(len(_a ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , _a )
__lowerCAmelCase = __add__
def __sub__( self , _a ):
return self + other * -1
def __mul__( self , _a ):
if not isinstance(_a , _a ):
lowercase : Tuple = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , _a )
lowercase : Any = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , _a )
__lowerCAmelCase = __mul__
def __truediv__( self , _a ):
if not isinstance(_a , _a ):
lowercase : Dict = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , _a )
raise ValueError
def __floordiv__( self , _a ):
if not isinstance(_a , _a ):
lowercase : List[Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , _a )
raise ValueError
def __pow__( self , _a ):
if n < 0 or isinstance(_a , _a ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
lowercase : List[str] = self
for _ in range(n - 1 ):
x *= self
return x
def __magic_name__ ( __snake_case : Tuple , __snake_case : Tuple , __snake_case : List[str] ) -> List[str]:
if not callable(a_ ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(a_ , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(a_ , a_ ):
raise ValueError("differentiate() requires an int as input for order" )
lowercase : Union[str, Any] = Dual(a_ , 1 )
lowercase : Optional[int] = func(a_ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(a_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def __magic_name__ ( __snake_case : int ) -> Dict:
return y**2 * y**4
print(differentiate(f, 9, 2))
| 361 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : str ,A : int ,A : int=2 ,A : Optional[Any]=3 ,A : Dict=4 ,A : Optional[int]=2 ,A : Union[str, Any]=7 ,A : List[str]=True ,A : Union[str, Any]=True ,A : Optional[int]=True ,A : Optional[int]=True ,A : Tuple=99 ,A : Optional[int]=36 ,A : Dict=3 ,A : str=4 ,A : Optional[Any]=37 ,A : Dict="gelu" ,A : Dict=0.1 ,A : Union[str, Any]=0.1 ,A : Union[str, Any]=5_12 ,A : Any=16 ,A : Union[str, Any]=2 ,A : List[Any]=0.02 ,A : List[Any]=6 ,A : Optional[int]=6 ,A : List[Any]=3 ,A : Union[str, Any]=4 ,A : Tuple=None ,A : List[str]=10_00 ,):
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = patch_size
__A = text_seq_length
__A = is_training
__A = use_input_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = coordinate_size
__A = shape_size
__A = num_labels
__A = num_choices
__A = scope
__A = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__A = text_seq_length
__A = (image_size // patch_size) ** 2 + 1
__A = self.text_seq_length + self.image_seq_length
def UpperCamelCase_ ( self : int ):
__A = ids_tensor([self.batch_size, self.text_seq_length] ,self.vocab_size )
__A = ids_tensor([self.batch_size, self.text_seq_length, 4] ,self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__A = bbox[i, j, 3]
__A = bbox[i, j, 1]
__A = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__A = bbox[i, j, 2]
__A = bbox[i, j, 0]
__A = t
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_input_mask:
__A = random_attention_mask([self.batch_size, self.text_seq_length] )
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.text_seq_length] ,self.type_vocab_size )
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.text_seq_length] ,self.num_labels )
__A = LayoutLMvaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,coordinate_size=self.coordinate_size ,shape_size=self.shape_size ,input_size=self.image_size ,patch_size=self.patch_size ,)
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase_ ( self : Optional[int] ,A : List[str] ,A : Any ,A : Dict ,A : List[Any] ,A : Optional[int] ,A : Any ,A : Dict ,A : List[Any] ):
__A = LayoutLMvaModel(config=A )
model.to(A )
model.eval()
# text + image
__A = model(A ,pixel_values=A )
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A )
__A = model(A ,bbox=A ,pixel_values=A ,token_type_ids=A )
__A = model(A ,bbox=A ,pixel_values=A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
# text only
__A = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__A = model(pixel_values=A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Optional[int] ,A : Dict ,A : List[str] ,A : Any ,A : List[Any] ,A : Any ,A : Any ,A : Dict ,A : Optional[Any] ):
__A = self.num_labels
__A = LayoutLMvaForSequenceClassification(A )
model.to(A )
model.eval()
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A ,labels=A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : str ,A : Optional[Any] ,A : Dict ,A : str ,A : Tuple ,A : Union[str, Any] ,A : List[Any] ,A : Any ,A : Union[str, Any] ):
__A = self.num_labels
__A = LayoutLMvaForTokenClassification(config=A )
model.to(A )
model.eval()
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A ,labels=A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Optional[int] ,A : Optional[Any] ,A : int ,A : str ,A : List[str] ,A : int ,A : List[str] ,A : List[str] ,A : Dict ):
__A = LayoutLMvaForQuestionAnswering(config=A )
model.to(A )
model.eval()
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A ,start_positions=A ,end_positions=A ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : str ):
__A = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = config_and_inputs
__A = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case_ = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self : str ,A : Any ,A : Any ,A : Tuple ,A : List[Any] ,A : Optional[Any] ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = LayoutLMvaModelTester(self )
__A = ConfigTester(self ,config_class=A ,hidden_size=37 )
def UpperCamelCase_ ( self : List[Any] ,A : int ,A : List[str] ,A : Dict=False ):
__A = copy.deepcopy(A )
if model_class in get_values(A ):
__A = {
k: v.unsqueeze(1 ).expand(-1 ,self.model_tester.num_choices ,-1 ).contiguous()
if isinstance(A ,torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(A ):
__A = torch.ones(self.model_tester.batch_size ,dtype=torch.long ,device=A )
elif model_class in get_values(A ):
__A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
__A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
elif model_class in [
*get_values(A ),
]:
__A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
elif model_class in [
*get_values(A ),
]:
__A = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) ,dtype=torch.long ,device=A ,)
return inputs_dict
def UpperCamelCase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : str ):
__A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__A = type
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
def UpperCamelCase_ ( self : str ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = LayoutLMvaModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
__A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : Any ):
return LayoutLMvaImageProcessor(apply_ocr=A ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self : Dict ):
__A = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(A )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="pt" ).pixel_values.to(A )
__A = torch.tensor([[1, 2]] )
__A = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
__A = model(
input_ids=input_ids.to(A ) ,bbox=bbox.to(A ) ,pixel_values=pixel_values.to(A ) ,)
# verify the logits
__A = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape ,A )
__A = torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,A ,atol=1E-4 ) )
| 55 | 0 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class __snake_case ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(snake_case ):
lowercase : Union[str, Any] = AutoConfig.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case ,snake_case )
lowercase : List[str] = FlaxAutoModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case ,snake_case )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(snake_case ):
lowercase : int = AutoConfig.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case ,snake_case )
lowercase : int = FlaxAutoModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case ,snake_case )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
lowercase : Any = AutoTokenizer.from_pretrained(snake_case )
lowercase : str = FlaxBertModel.from_pretrained(snake_case )
lowercase : Any = tokenizer("""Do you support jax jitted function?""" ,return_tensors=TensorType.JAX )
@jax.jit
def eval(**snake_case ):
return model(**snake_case )
eval(**snake_case ).block_until_ready()
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
lowercase : Optional[int] = AutoTokenizer.from_pretrained(snake_case )
lowercase : Tuple = FlaxRobertaModel.from_pretrained(snake_case )
lowercase : str = tokenizer("""Do you support jax jitted function?""" ,return_tensors=TensorType.JAX )
@jax.jit
def eval(**snake_case ):
return model(**snake_case )
eval(**snake_case ).block_until_ready()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case ,"""bert-base is not a local folder and is not a valid model identifier""" ):
lowercase : List[Any] = FlaxAutoModel.from_pretrained("""bert-base""" )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case ,r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowercase : List[str] = FlaxAutoModel.from_pretrained(snake_case ,revision="""aaaaaa""" )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case ,"""hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack""" ,):
lowercase : Optional[Any] = FlaxAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
with self.assertRaisesRegex(snake_case ,"""Use `from_pt=True` to load this model""" ):
lowercase : Any = FlaxAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
| 336 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any ,A : List[str] ,A : str=7 ,A : Optional[Any]=3 ,A : Any=18 ,A : int=30 ,A : int=4_00 ,A : List[str]=True ,A : Union[str, Any]=None ,A : Union[str, Any]=True ,A : Tuple=None ,A : Tuple=True ,A : Union[str, Any]=[0.5, 0.5, 0.5] ,A : str=[0.5, 0.5, 0.5] ,A : List[Any]=False ,):
__A = size if size is not None else {"height": 20, "width": 20}
__A = crop_size if crop_size is not None else {"height": 18, "width": 18}
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size
__A = do_center_crop
__A = crop_size
__A = do_normalize
__A = image_mean
__A = image_std
__A = do_reduce_labels
def UpperCamelCase_ ( self : List[str] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
__A = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__A = Image.open(dataset[0]["file"] )
__A = Image.open(dataset[1]["file"] )
return image, map
def UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
__A = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__A = Image.open(ds[0]["file"] )
__A = Image.open(ds[1]["file"] )
__A = Image.open(ds[2]["file"] )
__A = Image.open(ds[3]["file"] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = BeitImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : List[Any] ):
__A = BeitImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : int ):
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A ,"do_resize" ) )
self.assertTrue(hasattr(A ,"size" ) )
self.assertTrue(hasattr(A ,"do_center_crop" ) )
self.assertTrue(hasattr(A ,"center_crop" ) )
self.assertTrue(hasattr(A ,"do_normalize" ) )
self.assertTrue(hasattr(A ,"image_mean" ) )
self.assertTrue(hasattr(A ,"image_std" ) )
def UpperCamelCase_ ( self : List[str] ):
__A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"height": 20, "width": 20} )
self.assertEqual(image_processor.crop_size ,{"height": 18, "width": 18} )
self.assertEqual(image_processor.do_reduce_labels ,A )
__A = self.image_processing_class.from_dict(
self.image_processor_dict ,size=42 ,crop_size=84 ,reduce_labels=A )
self.assertEqual(image_processor.size ,{"height": 42, "width": 42} )
self.assertEqual(image_processor.crop_size ,{"height": 84, "width": 84} )
self.assertEqual(image_processor.do_reduce_labels ,A )
def UpperCamelCase_ ( self : List[Any] ):
pass
def UpperCamelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A ,Image.Image )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : List[str] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A )
for image in image_inputs:
self.assertIsInstance(A ,np.ndarray )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : int ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : str ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
__A = []
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__A = image_processing(image_inputs[0] ,maps[0] ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test batched
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test not batched input (PIL images)
__A , __A = prepare_semantic_single_inputs()
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test batched input (PIL images)
__A , __A = prepare_semantic_batch_inputs()
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
def UpperCamelCase_ ( self : Dict ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__A , __A = prepare_semantic_single_inputs()
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 1_50 )
__A = True
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
| 55 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("0.12.2"):
raise Exception("requires fairseq >= 0.12.2")
if version.parse(fairseq.__version__) > version.parse("2"):
raise Exception("requires fairseq < v2")
logging.set_verbosity_info()
__snake_case: str = logging.get_logger(__name__)
__snake_case: Optional[int] = 'Hello, World!'
__snake_case: Optional[Any] = 'en_XX'
def _snake_case ( A_ : List[Any] , A_ : List[str] , A_ : Union[str, Any] ):
"""simple docstring"""
a_ : Optional[Any] = Path("""data_bin""" )
a_ : Dict = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(a_ ).parent ) , checkpoint_file=Path(a_ ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(a_ ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(a_ ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(a_ )
a_ : Dict = xmod.model.encoder.sentence_encoder
a_ : Tuple = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
a_ : Any = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , a_ )
a_ : str = XmodForSequenceClassification(a_ ) if classification_head else XmodForMaskedLM(a_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
a_ : List[Any] = xmod_sent_encoder.embed_tokens.weight
a_ : Union[str, Any] = xmod_sent_encoder.embed_positions.weight
a_ : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
a_ : int = xmod_sent_encoder.layernorm_embedding.weight
a_ : str = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
a_ : Any = model.roberta.encoder.layer[i]
a_ : Optional[Any] = xmod_sent_encoder.layers[i]
# self attention
a_ : Tuple = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
a_ : List[str] = xmod_layer.self_attn.q_proj.weight
a_ : Tuple = xmod_layer.self_attn.q_proj.bias
a_ : Any = xmod_layer.self_attn.k_proj.weight
a_ : int = xmod_layer.self_attn.k_proj.bias
a_ : Optional[Any] = xmod_layer.self_attn.v_proj.weight
a_ : Any = xmod_layer.self_attn.v_proj.bias
# self-attention output
a_ : Tuple = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
a_ : str = xmod_layer.self_attn.out_proj.weight
a_ : Any = xmod_layer.self_attn.out_proj.bias
a_ : Optional[int] = xmod_layer.self_attn_layer_norm.weight
a_ : Optional[int] = xmod_layer.self_attn_layer_norm.bias
# intermediate
a_ : Optional[int] = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
a_ : Optional[Any] = xmod_layer.fca.weight
a_ : str = xmod_layer.fca.bias
# output
a_ : Optional[Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
a_ : Optional[int] = xmod_layer.fca.weight
a_ : Dict = xmod_layer.fca.bias
a_ : Optional[int] = xmod_layer.final_layer_norm.weight
a_ : int = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
a_ : Any = xmod_layer.adapter_layer_norm.weight
a_ : Dict = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
a_ : List[Any] = bert_output.adapter_modules[lang_code]
a_ : Tuple = xmod_layer.adapter_modules[lang_code]
a_ : Optional[int] = from_adapter.fca.weight
a_ : Any = from_adapter.fca.bias
a_ : int = from_adapter.fca.weight
a_ : Dict = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
a_ : Any = xmod_sent_encoder.layer_norm.weight
a_ : List[str] = xmod_sent_encoder.layer_norm.bias
if classification_head:
a_ : Optional[Any] = xmod.model.classification_heads["""mnli"""].dense.weight
a_ : Union[str, Any] = xmod.model.classification_heads["""mnli"""].dense.bias
a_ : Optional[Any] = xmod.model.classification_heads["""mnli"""].out_proj.weight
a_ : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
a_ : Any = xmod.model.encoder.lm_head.dense.weight
a_ : Tuple = xmod.model.encoder.lm_head.dense.bias
a_ : Optional[int] = xmod.model.encoder.lm_head.layer_norm.weight
a_ : List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
a_ : List[str] = xmod.model.encoder.lm_head.weight
a_ : Union[str, Any] = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
a_ : str = xmod.encode(a_ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(a_ )
a_ : Any = model(a_ )[0]
if classification_head:
a_ : int = xmod.model.classification_heads["""mnli"""](xmod.extract_features(a_ ) )
else:
a_ : Union[str, Any] = xmod.model(a_ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
a_ : Tuple = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
a_ : int = torch.allclose(a_ , a_ , atol=1E-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(a_ ).mkdir(parents=a_ , exist_ok=a_ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(a_ )
if __name__ == "__main__":
__snake_case: Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xmod_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
__snake_case: str = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 577 |
from numpy import exp, pi, sqrt
def UpperCAmelCase ( a_ , a_ = 0.0 , a_ = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55 | 0 |
"""simple docstring"""
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
__snake_case : List[str] = logging.get_logger(__name__)
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[int] , _lowerCamelCase : str = None , _lowerCamelCase : uuid.UUID = None , _lowerCamelCase : List[Any]=None , _lowerCamelCase : str=None ):
if not conversation_id:
A__ = uuid.uuida()
if past_user_inputs is None:
A__ = []
if generated_responses is None:
A__ = []
A__ = conversation_id
A__ = past_user_inputs
A__ = generated_responses
A__ = text
def __eq__( self : int , _lowerCamelCase : str ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def A__ ( self : Tuple , _lowerCamelCase : str , _lowerCamelCase : bool = False ):
if self.new_user_input:
if overwrite:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
F'''with: "{text}".''' )
A__ = text
else:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
A__ = text
def A__ ( self : List[str] ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
A__ = None
def A__ ( self : int , _lowerCamelCase : str ):
self.generated_responses.append(_lowerCamelCase )
def A__ ( self : Dict ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Tuple ):
A__ = F'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
A__ = '''user''' if is_user else '''bot'''
output += F'''{name} >> {text} \n'''
return output
@add_end_docstrings(
__SCREAMING_SNAKE_CASE , r"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , )
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : List[str] , *_lowerCamelCase : int , **_lowerCamelCase : Any ):
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
if self.tokenizer.pad_token_id is None:
A__ = self.tokenizer.eos_token
def A__ ( self : Dict , _lowerCamelCase : Tuple=None , _lowerCamelCase : int=None , _lowerCamelCase : Union[str, Any]=None , **_lowerCamelCase : Dict ):
A__ = {}
A__ = {}
A__ = {}
if min_length_for_response is not None:
A__ = min_length_for_response
if minimum_tokens is not None:
A__ = minimum_tokens
if "max_length" in generate_kwargs:
A__ = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
A__ = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_lowerCamelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Optional[Any] , _lowerCamelCase : Union[Conversation, List[Conversation]] , _lowerCamelCase : Tuple=0 , **_lowerCamelCase : int ):
A__ = super().__call__(_lowerCamelCase , num_workers=_lowerCamelCase , **_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) == 1:
return outputs[0]
return outputs
def A__ ( self : Tuple , _lowerCamelCase : Conversation , _lowerCamelCase : Union[str, Any]=3_2 ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ):
A__ = self.tokenizer._build_conversation_input_ids(_lowerCamelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
A__ = self._legacy_parse_and_tokenize(_lowerCamelCase )
if self.framework == "pt":
A__ = torch.LongTensor([input_ids] )
elif self.framework == "tf":
A__ = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def A__ ( self : Tuple , _lowerCamelCase : str , _lowerCamelCase : Union[str, Any]=1_0 , **_lowerCamelCase : Union[str, Any] ):
A__ = generate_kwargs.get('''max_length''' , self.model.config.max_length )
A__ = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
A__ = max_length - minimum_tokens
A__ = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
A__ = model_inputs['''attention_mask'''][:, -trim:]
A__ = model_inputs.pop('''conversation''' )
A__ = max_length
A__ = self.model.generate(**_lowerCamelCase , **_lowerCamelCase )
if self.model.config.is_encoder_decoder:
A__ = 1
else:
A__ = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def A__ ( self : Any , _lowerCamelCase : str , _lowerCamelCase : Tuple=True ):
A__ = model_outputs['''output_ids''']
A__ = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase , )
A__ = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(_lowerCamelCase )
return conversation
def A__ ( self : Any , _lowerCamelCase : Conversation ):
A__ = self.tokenizer.eos_token_id
A__ = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) )
if len(_lowerCamelCase ) > self.tokenizer.model_max_length:
A__ = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 571 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCamelCase_ ( self : Optional[int] ):
__A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
__A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
__A = "xvjiarui/stable-diffusion-2-inpainting"
__A , __A = FlaxStableDiffusionInpaintPipeline.from_pretrained(A ,safety_checker=A )
__A = "Face of a yellow cat, high resolution, sitting on a park bench"
__A = jax.random.PRNGKey(0 )
__A = 50
__A = jax.device_count()
__A = num_samples * [prompt]
__A = num_samples * [init_image]
__A = num_samples * [mask_image]
__A , __A , __A = pipeline.prepare_inputs(A ,A ,A )
# shard inputs and rng
__A = replicate(A )
__A = jax.random.split(A ,jax.device_count() )
__A = shard(A )
__A = shard(A )
__A = shard(A )
__A = pipeline(
A ,A ,A ,A ,A ,A ,jit=A )
__A = output.images.reshape(A ,5_12 ,5_12 ,3 )
__A = images[0, 2_53:2_56, 2_53:2_56, -1]
__A = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__A = jnp.array(
[0.3_61_13_07, 0.37_64_97_36, 0.3_75_74_08, 0.38_21_39_53, 0.39_29_51_67, 0.3_84_16_31, 0.41_55_49_78, 0.4_13_74_75, 0.4_21_70_84] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 55 | 0 |
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
lowercase = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
lowercase = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
lowercase = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def a_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , )
def a_ ( self , a__ , a__ , a__=None , a__=1 , a__="binary" , a__=None ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = fa_score(
a__ , a__ , labels=a__ , pos_label=a__ , average=a__ , sample_weight=a__ )
return {"f1": float(a__ ) if score.size == 1 else score}
| 211 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any ,A : Optional[int] ,A : Optional[int]=7 ,A : Optional[Any]=3 ,A : List[str]=18 ,A : Any=30 ,A : Tuple=4_00 ,A : Union[str, Any]=True ,A : Optional[Any]=32 ,A : Union[str, Any]=True ,):
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size_divisor
__A = do_rescale
def UpperCamelCase_ ( self : Union[str, Any] ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = GLPNImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : int ):
__A = GLPNImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Any ):
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A ,"do_resize" ) )
self.assertTrue(hasattr(A ,"size_divisor" ) )
self.assertTrue(hasattr(A ,"resample" ) )
self.assertTrue(hasattr(A ,"do_rescale" ) )
def UpperCamelCase_ ( self : str ):
pass
def UpperCamelCase_ ( self : Dict ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A ,Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def UpperCamelCase_ ( self : Optional[Any] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A )
for image in image_inputs:
self.assertIsInstance(A ,np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def UpperCamelCase_ ( self : int ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 55 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case : int = logging.get_logger(__name__)
snake_case : List[str] = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE):
"""simple docstring"""
__UpperCAmelCase = """deformable_detr"""
__UpperCAmelCase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Dict , UpperCamelCase_ : str=True , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Optional[Any]=3_0_0 , UpperCamelCase_ : Optional[Any]=1_0_2_4 , UpperCamelCase_ : int=6 , UpperCamelCase_ : Tuple=1_0_2_4 , UpperCamelCase_ : List[Any]=8 , UpperCamelCase_ : Any=6 , UpperCamelCase_ : int=1_0_2_4 , UpperCamelCase_ : int=8 , UpperCamelCase_ : Tuple=0.0 , UpperCamelCase_ : int=True , UpperCamelCase_ : Any="relu" , UpperCamelCase_ : Dict=2_5_6 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : List[Any]=0.02 , UpperCamelCase_ : Optional[Any]=1.0 , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Optional[Any]=False , UpperCamelCase_ : Optional[Any]="sine" , UpperCamelCase_ : int="resnet50" , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : Tuple=4 , UpperCamelCase_ : int=4 , UpperCamelCase_ : str=4 , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : Optional[Any]=3_0_0 , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : Union[str, Any]=1 , UpperCamelCase_ : Optional[Any]=5 , UpperCamelCase_ : List[str]=2 , UpperCamelCase_ : List[str]=1 , UpperCamelCase_ : Dict=1 , UpperCamelCase_ : Union[str, Any]=5 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : Tuple=0.25 , UpperCamelCase_ : int=False , **UpperCamelCase_ : Optional[int] , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
__magic_name__ = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__magic_name__ = backbone_config.get('model_type' )
__magic_name__ = CONFIG_MAPPING[backbone_model_type]
__magic_name__ = config_class.from_dict(UpperCamelCase_ )
__magic_name__ = use_timm_backbone
__magic_name__ = backbone_config
__magic_name__ = num_channels
__magic_name__ = num_queries
__magic_name__ = max_position_embeddings
__magic_name__ = d_model
__magic_name__ = encoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = init_xavier_std
__magic_name__ = encoder_layerdrop
__magic_name__ = auxiliary_loss
__magic_name__ = position_embedding_type
__magic_name__ = backbone
__magic_name__ = use_pretrained_backbone
__magic_name__ = dilation
# deformable attributes
__magic_name__ = num_feature_levels
__magic_name__ = encoder_n_points
__magic_name__ = decoder_n_points
__magic_name__ = two_stage
__magic_name__ = two_stage_num_proposals
__magic_name__ = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
__magic_name__ = class_cost
__magic_name__ = bbox_cost
__magic_name__ = giou_cost
# Loss coefficients
__magic_name__ = mask_loss_coefficient
__magic_name__ = dice_loss_coefficient
__magic_name__ = bbox_loss_coefficient
__magic_name__ = giou_loss_coefficient
__magic_name__ = eos_coefficient
__magic_name__ = focal_alpha
__magic_name__ = disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCamelCase_ , **UpperCamelCase_ )
@property
def a__ ( self : List[Any] ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def a__ ( self : Optional[Any] ):
'''simple docstring'''
return self.d_model
def a__ ( self : List[str] ):
'''simple docstring'''
__magic_name__ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__magic_name__ = self.backbone_config.to_dict()
__magic_name__ = self.__class__.model_type
return output
| 545 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case_ = Features({"image": Image()} )
snake_case_ = Features({"labels": ClassLabel} )
snake_case_ = "image"
snake_case_ = "labels"
def UpperCamelCase_ ( self : Optional[Any] ,A : Tuple ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] ,A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
__A = copy.deepcopy(self )
__A = self.label_schema.copy()
__A = features[self.label_column]
__A = label_schema
return task_template
@property
def UpperCamelCase_ ( self : Any ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 55 | 0 |
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class __a :
def snake_case_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
_lowerCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
_lowerCamelCase = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
_lowerCamelCase = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , thresholding=a__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
_lowerCamelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def snake_case_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
_lowerCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
_lowerCamelCase = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.414 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
_lowerCamelCase = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , thresholding=a__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
_lowerCamelCase = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
_lowerCamelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def snake_case_ ( self ):
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCamelCase = self.get_dummy_inputs(a__ )
_lowerCamelCase = inputs['prompt']
_lowerCamelCase = inputs['generator']
_lowerCamelCase = inputs['num_inference_steps']
_lowerCamelCase = inputs['output_type']
if "image" in inputs:
_lowerCamelCase = inputs['image']
else:
_lowerCamelCase = None
if "mask_image" in inputs:
_lowerCamelCase = inputs['mask_image']
else:
_lowerCamelCase = None
if "original_image" in inputs:
_lowerCamelCase = inputs['original_image']
else:
_lowerCamelCase = None
_lowerCamelCase , _lowerCamelCase = pipe.encode_prompt(a__ )
# inputs with prompt converted to embeddings
_lowerCamelCase = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
_lowerCamelCase = image
if mask_image is not None:
_lowerCamelCase = mask_image
if original_image is not None:
_lowerCamelCase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(a__ , a__ , a__ )
_lowerCamelCase = pipe(**a__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a__ )
_lowerCamelCase = self.pipeline_class.from_pretrained(a__ )
pipe_loaded.to(a__ )
pipe_loaded.set_progress_bar_config(disable=a__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(a__ , a__ ) is None , F'`{optional_component}` did not stay set to None after loading.' , )
_lowerCamelCase = self.get_dummy_inputs(a__ )
_lowerCamelCase = inputs['generator']
_lowerCamelCase = inputs['num_inference_steps']
_lowerCamelCase = inputs['output_type']
# inputs with prompt converted to embeddings
_lowerCamelCase = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
_lowerCamelCase = image
if mask_image is not None:
_lowerCamelCase = mask_image
if original_image is not None:
_lowerCamelCase = original_image
_lowerCamelCase = pipe_loaded(**a__ )[0]
_lowerCamelCase = np.abs(to_np(a__ ) - to_np(a__ ) ).max()
self.assertLess(a__ , 1e-4 )
def snake_case_ ( self ):
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCamelCase = self.get_dummy_inputs(a__ )
_lowerCamelCase = pipe(**a__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a__ )
_lowerCamelCase = self.pipeline_class.from_pretrained(a__ )
pipe_loaded.to(a__ )
pipe_loaded.set_progress_bar_config(disable=a__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
_lowerCamelCase = self.get_dummy_inputs(a__ )
_lowerCamelCase = pipe_loaded(**a__ )[0]
_lowerCamelCase = np.abs(to_np(a__ ) - to_np(a__ ) ).max()
self.assertLess(a__ , 1e-4 )
| 650 |
from math import sqrt
def UpperCAmelCase ( a_ ) -> bool:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number >= 0
), "'number' must been an int and positive"
__A = True
# 0 and 1 are none primes.
if number <= 1:
__A = False
for divisor in range(2 , int(round(sqrt(a_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__A = False
break
# precondition
assert isinstance(a_ , a_ ), "'status' must been from type bool"
return status
def UpperCAmelCase ( a_ ) -> Any:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__A = list(range(2 , n + 1 ) )
__A = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(a_ ) ):
for j in range(i + 1 , len(a_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__A = 0
# filters actual prime numbers.
__A = [x for x in begin_list if x != 0]
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n > 2), "'N' must been an int and > 2"
__A = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(a_ ):
ans.append(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and number >= 0, "'number' must been an int and >= 0"
__A = [] # this list will be returns of the function.
# potential prime number factors.
__A = 2
__A = number
if number == 0 or number == 1:
ans.append(a_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(a_ ):
while quotient != 1:
if is_prime(a_ ) and (quotient % factor == 0):
ans.append(a_ )
quotient /= factor
else:
factor += 1
else:
ans.append(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase ( a_ ) -> Any:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__A = 0
# prime factorization of 'number'
__A = prime_factorization(a_ )
__A = max(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type int"
return ans
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__A = 0
# prime factorization of 'number'
__A = prime_factorization(a_ )
__A = min(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type int"
return ans
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
assert isinstance(a_ , a_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , a_ ), "compare bust been from type bool"
return number % 2 == 0
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
assert isinstance(a_ , a_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , a_ ), "compare bust been from type bool"
return number % 2 != 0
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
assert (
isinstance(a_ , a_ ) and (number > 2) and is_even(a_ )
), "'number' must been an int, even and > 2"
__A = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__A = get_prime_numbers(a_ )
__A = len(a_ )
# run variable for while-loops.
__A = 0
__A = None
# exit variable. for break up the loops
__A = True
while i < len_pn and loop:
__A = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__A = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(a_ , a_ )
and (len(a_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def UpperCAmelCase ( a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__A = 0
while numbera != 0:
__A = numbera % numbera
__A = numbera
__A = rest
# precondition
assert isinstance(a_ , a_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def UpperCAmelCase ( a_ , a_ ) -> List[str]:
"""simple docstring"""
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__A = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__A = prime_factorization(a_ )
__A = prime_factorization(a_ )
elif numbera == 1 or numbera == 1:
__A = []
__A = []
__A = max(a_ , a_ )
__A = 0
__A = 0
__A = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__A = prime_fac_a.count(a_ )
__A = prime_fac_a.count(a_ )
for _ in range(max(a_ , a_ ) ):
ans *= n
else:
__A = prime_fac_a.count(a_ )
for _ in range(a_ ):
ans *= n
done.append(a_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__A = prime_fac_a.count(a_ )
for _ in range(a_ ):
ans *= n
done.append(a_ )
# precondition
assert isinstance(a_ , a_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 0), "'number' must been a positive int"
__A = 0
__A = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(a_ ):
ans += 1
# precondition
assert isinstance(a_ , a_ ) and is_prime(
a_ ), "'ans' must been a prime number and from type int"
return ans
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
assert (
is_prime(a_ ) and is_prime(a_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__A = p_number_a + 1 # jump to the next number
__A = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(a_ ):
number += 1
while number < p_number_a:
ans.append(a_ )
number += 1
# fetch the next prime number.
while not is_prime(a_ ):
number += 1
# precondition
assert (
isinstance(a_ , a_ )
and ans[0] != p_number_a
and ans[len(a_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 1), "'n' must been int and >= 1"
__A = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(a_ )
# precondition
assert ans[0] == 1 and ans[len(a_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number > 1
), "'number' must been an int and >= 1"
__A = get_divisors(a_ )
# precondition
assert (
isinstance(a_ , a_ )
and (divisors[0] == 1)
and (divisors[len(a_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def UpperCAmelCase ( a_ , a_ ) -> str:
"""simple docstring"""
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__A = gcd(abs(a_ ) , abs(a_ ) )
# precondition
assert (
isinstance(a_ , a_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 0), "'n' must been a int and >= 0"
__A = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 0), "'n' must been an int and >= 0"
__A = 0
__A = 1
__A = 1 # this will be return
for _ in range(n - 1 ):
__A = ans
ans += fiba
__A = tmp
return ans
| 55 | 0 |
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
UpperCAmelCase : Dict = logging.get_logger(__name__)
@add_end_docstrings(
__SCREAMING_SNAKE_CASE , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : GenericTensor ) -> Optional[Any]:
"""simple docstring"""
if self.framework == "tf":
__SCREAMING_SNAKE_CASE = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
__SCREAMING_SNAKE_CASE = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE )
else:
raise ValueError("""Unsupported framework""" )
return masked_index
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : GenericTensor ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_masked_index(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , f'No mask_token ({self.tokenizer.mask_token}) found on the input' , )
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : GenericTensor ) -> str:
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : List[str] ) -> int:
"""simple docstring"""
if return_tensors is None:
__SCREAMING_SNAKE_CASE = self.framework
__SCREAMING_SNAKE_CASE = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE )
self.ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE )
return model_inputs
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model_inputs["""input_ids"""]
return model_outputs
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str=5 , __SCREAMING_SNAKE_CASE : Optional[int]=None ) -> int:
"""simple docstring"""
if target_ids is not None and target_ids.shape[0] < top_k:
__SCREAMING_SNAKE_CASE = target_ids.shape[0]
__SCREAMING_SNAKE_CASE = model_outputs["""input_ids"""][0]
__SCREAMING_SNAKE_CASE = model_outputs["""logits"""]
if self.framework == "tf":
__SCREAMING_SNAKE_CASE = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
__SCREAMING_SNAKE_CASE = outputs.numpy()
__SCREAMING_SNAKE_CASE = outputs[0, masked_index, :]
__SCREAMING_SNAKE_CASE = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
if target_ids is not None:
__SCREAMING_SNAKE_CASE = tf.gather_nd(tf.squeeze(__SCREAMING_SNAKE_CASE , 0 ) , target_ids.reshape(-1 , 1 ) )
__SCREAMING_SNAKE_CASE = tf.expand_dims(__SCREAMING_SNAKE_CASE , 0 )
__SCREAMING_SNAKE_CASE = tf.math.top_k(__SCREAMING_SNAKE_CASE , k=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = topk.values.numpy(), topk.indices.numpy()
else:
__SCREAMING_SNAKE_CASE = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
__SCREAMING_SNAKE_CASE = outputs[0, masked_index, :]
__SCREAMING_SNAKE_CASE = logits.softmax(dim=-1 )
if target_ids is not None:
__SCREAMING_SNAKE_CASE = probs[..., target_ids]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = probs.topk(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
__SCREAMING_SNAKE_CASE = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
__SCREAMING_SNAKE_CASE = input_ids.numpy().copy()
if target_ids is not None:
__SCREAMING_SNAKE_CASE = target_ids[p].tolist()
__SCREAMING_SNAKE_CASE = p
# Filter padding out:
__SCREAMING_SNAKE_CASE = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__SCREAMING_SNAKE_CASE = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence}
row.append(__SCREAMING_SNAKE_CASE )
result.append(__SCREAMING_SNAKE_CASE )
if single_mask:
return result[0]
return result
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int=None ) -> int:
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = [targets]
try:
__SCREAMING_SNAKE_CASE = self.tokenizer.get_vocab()
except Exception:
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = []
for target in targets:
__SCREAMING_SNAKE_CASE = vocab.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if id_ is None:
__SCREAMING_SNAKE_CASE = self.tokenizer(
__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , max_length=1 , truncation=__SCREAMING_SNAKE_CASE , )["""input_ids"""]
if len(__SCREAMING_SNAKE_CASE ) == 0:
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
"""We cannot replace it with anything meaningful, ignoring it""" )
continue
__SCREAMING_SNAKE_CASE = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
f'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' )
target_ids.append(id_ )
__SCREAMING_SNAKE_CASE = list(set(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("""At least one target must be provided when passed.""" )
__SCREAMING_SNAKE_CASE = np.array(__SCREAMING_SNAKE_CASE )
return target_ids
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : List[str]=None ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
if targets is not None:
__SCREAMING_SNAKE_CASE = self.get_target_ids(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = target_ids
if top_k is not None:
__SCREAMING_SNAKE_CASE = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""" )
return {}, {}, postprocess_params
def __call__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : str ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) == 1:
return outputs[0]
return outputs
| 627 |
import os
def UpperCAmelCase ( ) -> Any:
"""simple docstring"""
__A = os.path.dirname(os.path.realpath(a_ ) )
__A = os.path.join(a_ , "triangle.txt" )
with open(a_ ) as f:
__A = f.readlines()
__A = []
for line in triangle:
__A = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(a_ ) )
a.append(a_ )
for i in range(1 , len(a_ ) ):
for j in range(len(a[i] ) ):
__A = a[i - 1][j] if j != len(a[i - 1] ) else 0
__A = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(a_ , a_ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 55 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
__snake_case = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def _lowerCamelCase ( lowerCamelCase__ : Optional[int] = "mumbai" ):
lowercase__ : Dict = BeautifulSoup(requests.get(url + location ).content , """html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ):
lowercase__ : int = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
lowercase__ : Dict = job.find("""span""" , {"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(F"Job {i:>2} is {job[0]} at {job[1]}")
| 200 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
SCREAMING_SNAKE_CASE :Union[str, Any] = object()
# For specifying empty leaf dict `{}`
SCREAMING_SNAKE_CASE :List[str] = object()
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
__A = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(a_ ) - len(a_ ) + 1 ):
__A = [x.match(a_ ) for x, y in zip(a_ , ks[i:] )]
if matches and all(a_ ):
return True
return False
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
def replace(a_ , a_ ):
for rule, replacement in rules:
if _match(a_ , a_ ):
return replacement
return val
return replace
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , a_ )),
(("transformer", "wte", "embedding"), P("mp" , a_ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(a_ , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , a_ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(a_ , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , a_ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
__A = _get_partition_rules()
__A = _replacement_rules(a_ )
__A = {k: _unmatched for k in flatten_dict(a_ )}
__A = {k: replace(a_ , a_ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(a_ ) )
| 55 | 0 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=1_024 , _UpperCamelCase=1_024 , _UpperCamelCase=False , **_UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = AutoTokenizer.from_pretrained(a_ )
snake_case_ : Union[str, Any] = SeqaSeqDataset(a_ , a_ , a_ , a_ , type_path='''train''' , **a_ )
snake_case_ : Union[str, Any] = tok.pad_token_id
def get_lens(_UpperCamelCase ):
snake_case_ : Any = tqdm(
DataLoader(a_ , batch_size=512 , num_workers=8 , shuffle=a_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
snake_case_ : int = []
for batch in dl:
snake_case_ : Tuple = batch['''input_ids'''].ne(a_ ).sum(1 ).tolist()
snake_case_ : List[Any] = batch['''labels'''].ne(a_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(a_ , a_ ):
max_lens.append(max(a_ , a_ ) )
else:
max_lens.extend(a_ )
return max_lens
snake_case_ : List[Any] = get_lens(a_ )
snake_case_ : Tuple = SeqaSeqDataset(a_ , a_ , a_ , a_ , type_path='''val''' , **a_ )
snake_case_ : Optional[Any] = get_lens(a_ )
pickle_save(a_ , train_ds.len_file )
pickle_save(a_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 60 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] ,A : Union[str, Any] ,A : List[Any]=13 ,A : Optional[Any]=30 ,A : Union[str, Any]=2 ,A : Union[str, Any]=3 ,A : Any=True ,A : Dict=True ,A : str=32 ,A : Tuple=2 ,A : Optional[int]=4 ,A : Tuple=37 ,A : List[Any]="gelu" ,A : Dict=0.1 ,A : Optional[int]=0.1 ,A : List[Any]=10 ,A : Optional[Any]=0.02 ,A : Dict=3 ,A : Dict=None ,A : List[Any]=2 ,):
__A = parent
__A = batch_size
__A = image_size
__A = patch_size
__A = num_channels
__A = is_training
__A = use_labels
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = type_sequence_label_size
__A = initializer_range
__A = scope
__A = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__A = (image_size // patch_size) ** 2
__A = num_patches + 2
def UpperCamelCase_ ( self : List[Any] ):
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__A = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : Optional[int] ):
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=A ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def UpperCamelCase_ ( self : List[str] ,A : List[Any] ,A : Optional[int] ,A : Union[str, Any] ):
__A = TFDeiTModel(config=A )
__A = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : List[Any] ,A : List[Any] ,A : Optional[Any] ,A : Dict ):
__A = TFDeiTForMaskedImageModeling(config=A )
__A = model(A )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__A = 1
__A = TFDeiTForMaskedImageModeling(A )
__A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A = model(A )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase_ ( self : Optional[Any] ,A : Union[str, Any] ,A : Dict ,A : Union[str, Any] ):
__A = self.type_sequence_label_size
__A = TFDeiTForImageClassification(A )
__A = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__A = 1
__A = TFDeiTForImageClassification(A )
__A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self : str ):
__A = self.prepare_config_and_inputs()
__A , __A , __A = config_and_inputs
__A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
snake_case_ = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def UpperCamelCase_ ( self : str ):
__A = TFDeiTModelTester(self )
__A = ConfigTester(self ,config_class=A ,has_text_modality=A ,hidden_size=37 )
def UpperCamelCase_ ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def UpperCamelCase_ ( self : Union[str, Any] ):
pass
def UpperCamelCase_ ( self : List[Any] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
__A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A ,tf.keras.layers.Dense ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
__A = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,A )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCamelCase_ ( self : Optional[int] ,A : Union[str, Any] ,A : List[str] ,A : Optional[Any]=False ):
__A = super()._prepare_for_class(A ,A ,return_labels=A )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def UpperCamelCase_ ( self : Any ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = TFDeiTModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
__A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : int ):
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase_ ( self : Optional[int] ):
__A = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="tf" )
# forward pass
__A = model(**A )
# verify the logits
__A = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape ,A )
__A = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,A ,atol=1E-4 ) )
| 55 | 0 |
"""simple docstring"""
__UpperCAmelCase = {str(digit): digit**5 for digit in range(10)}
def lowercase__ ( lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(a_ ) )
def lowercase__ ( ) -> int:
'''simple docstring'''
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(a_ ) )
if __name__ == "__main__":
print(solution())
| 642 |
SCREAMING_SNAKE_CASE :List[Any] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
SCREAMING_SNAKE_CASE :Union[str, Any] = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
SCREAMING_SNAKE_CASE :int = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def UpperCAmelCase ( a_ , a_ , a_ ) -> str:
"""simple docstring"""
assert len(str(a_ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 1_2, "month should be between 1 to 12"
assert 1 <= day <= 3_1, "day should be between 1 to 31"
# Doomsday algorithm:
__A = year // 1_0_0
__A = (5 * (century % 4) + 2) % 7
__A = year % 1_0_0
__A = centurian % 1_2
__A = (
(centurian // 1_2) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__A = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_0_0) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__A = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55 | 0 |
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 361 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def UpperCAmelCase ( a_ = "isbn/0140328726" ) -> dict:
"""simple docstring"""
__A = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
__A = F'''{olid} is not a valid Open Library olid'''
raise ValueError(a_ )
return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json()
def UpperCAmelCase ( a_ ) -> dict:
"""simple docstring"""
__A = {
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
__A = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__A = [
get_openlibrary_data(author["key"] )["name"] for author in data["Authors"]
]
__A = data["First sentence"]["value"]
for key, value in data.items():
if isinstance(a_ , a_ ):
__A = ", ".join(a_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
SCREAMING_SNAKE_CASE :int = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
SCREAMING_SNAKE_CASE :Any = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print('\n'.join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 55 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.