code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
UpperCAmelCase__ : Any = logging.getLogger(__name__)
def A ( snake_case__ : str ) -> Dict:
'''simple docstring'''
__snake_case = git.Repo(search_parent_directories=snake_case__ )
__snake_case = {
'repo_id': str(snake_case__ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(snake_case__ , 'git_log.json' ) , 'w' ) as f:
json.dump(snake_case__ , snake_case__ , indent=4 )
def A ( snake_case__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
if params.n_gpu <= 0:
__snake_case = 0
__snake_case = -1
__snake_case = True
__snake_case = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
__snake_case = int(os.environ['WORLD_SIZE'] )
__snake_case = int(os.environ['N_GPU_NODE'] )
__snake_case = int(os.environ['RANK'] )
# number of nodes / node ID
__snake_case = params.world_size // params.n_gpu_per_node
__snake_case = params.global_rank // params.n_gpu_per_node
__snake_case = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
__snake_case = 1
__snake_case = 0
__snake_case = 0
__snake_case = 0
__snake_case = 1
__snake_case = 1
__snake_case = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
__snake_case = params.node_id == 0 and params.local_rank == 0
__snake_case = params.n_nodes > 1
# summary
__snake_case = f"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def A ( snake_case__ : Any ) -> Any:
'''simple docstring'''
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 313 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class __lowercase :
def __init__( self , lowercase_ = "cpu" , lowercase_ = "openai/clip-vit-large-patch14") -> None:
__snake_case = device
__snake_case = CLIPTokenizerFast.from_pretrained(lowercase_)
__snake_case = [0.4814_5466, 0.457_8275, 0.4082_1073]
__snake_case = [0.2686_2954, 0.2613_0258, 0.2757_7711]
__snake_case = torchvision.transforms.Normalize(self.image_mean , self.image_std)
__snake_case = torchvision.transforms.Resize(2_2_4)
__snake_case = torchvision.transforms.CenterCrop(2_2_4)
def _a ( self , lowercase_) -> int:
__snake_case = self.resize(lowercase_)
__snake_case = self.center_crop(lowercase_)
__snake_case = self.normalize(lowercase_)
return images
def __call__( self , lowercase_=None , lowercase_=None , **lowercase_) -> Union[str, Any]:
__snake_case = self.tokenizer(text=lowercase_ , **lowercase_)
__snake_case = self.preprocess_img(lowercase_)
__snake_case = {key: value.to(self.device) for (key, value) in encoding.items()}
return encoding
class __lowercase ( nn.Module ):
def __init__( self , lowercase_=1_0 , lowercase_=0.01 , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=False , lowercase_=True , lowercase_="image" , lowercase_=True , lowercase_=False , lowercase_=False , lowercase_=False , ) -> None:
super().__init__()
__snake_case = None
__snake_case = device if device else get_device()
if vqgan:
__snake_case = vqgan
else:
__snake_case = load_vqgan(self.device , conf_path=lowercase_ , ckpt_path=lowercase_)
self.vqgan.eval()
if clip:
__snake_case = clip
else:
__snake_case = CLIPModel.from_pretrained('openai/clip-vit-base-patch32')
self.clip.to(self.device)
__snake_case = ProcessorGradientFlow(device=self.device)
__snake_case = iterations
__snake_case = lr
__snake_case = log
__snake_case = make_grid
__snake_case = return_val
__snake_case = quantize
__snake_case = self.vqgan.decoder.z_shape
def _a ( self , lowercase_=None , lowercase_=None , lowercase_=5 , lowercase_=True) -> List[str]:
__snake_case = []
if output_path is None:
__snake_case = './animation.gif'
if input_path is None:
__snake_case = self.save_path
__snake_case = sorted(glob(input_path + '/*'))
if not len(lowercase_):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)')
if len(lowercase_) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)')
__snake_case = total_duration / len(lowercase_)
__snake_case = [frame_duration] * len(lowercase_)
if extend_frames:
__snake_case = 1.5
__snake_case = 3
for file_name in paths:
if file_name.endswith('.png'):
images.append(imageio.imread(lowercase_))
imageio.mimsave(lowercase_ , lowercase_ , duration=lowercase_)
print(F"gif saved to {output_path}")
def _a ( self , lowercase_=None , lowercase_=None) -> Union[str, Any]:
if not (path or img):
raise ValueError('Input either path or tensor')
if img is not None:
raise NotImplementedError
__snake_case = preprocess(Image.open(lowercase_) , target_image_size=2_5_6).to(self.device)
__snake_case = preprocess_vqgan(lowercase_)
__snake_case , *__snake_case = self.vqgan.encode(lowercase_)
return z
def _a ( self , lowercase_) -> Dict:
__snake_case = self.latent.detach().requires_grad_()
__snake_case = base_latent + transform_vector
if self.quantize:
__snake_case , *__snake_case = self.vqgan.quantize(lowercase_)
else:
__snake_case = trans_latent
return self.vqgan.decode(lowercase_)
def _a ( self , lowercase_ , lowercase_ , lowercase_=None) -> Any:
__snake_case = self.clip_preprocessor(text=lowercase_ , images=lowercase_ , return_tensors='pt' , padding=lowercase_)
__snake_case = self.clip(**lowercase_)
__snake_case = clip_outputs.logits_per_image
if weights is not None:
__snake_case = similarity_logits * weights
return similarity_logits.sum()
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> List[Any]:
__snake_case = self._get_clip_similarity(pos_prompts['prompts'] , lowercase_ , weights=(1 / pos_prompts['weights']))
if neg_prompts:
__snake_case = self._get_clip_similarity(neg_prompts['prompts'] , lowercase_ , weights=neg_prompts['weights'])
else:
__snake_case = torch.tensor([1] , device=self.device)
__snake_case = -torch.log(lowercase_) + torch.log(lowercase_)
return loss
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> Any:
__snake_case = torch.randn_like(self.latent , requires_grad=lowercase_ , device=self.device)
__snake_case = torch.optim.Adam([vector] , lr=self.lr)
for i in range(self.iterations):
optim.zero_grad()
__snake_case = self._add_vector(lowercase_)
__snake_case = loop_post_process(lowercase_)
__snake_case = self._get_CLIP_loss(lowercase_ , lowercase_ , lowercase_)
print('CLIP loss' , lowercase_)
if self.log:
wandb.log({'CLIP Loss': clip_loss})
clip_loss.backward(retain_graph=lowercase_)
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0])
else:
yield vector
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> Any:
wandb.init(reinit=lowercase_ , project='face-editor')
wandb.config.update({'Positive Prompts': positive_prompts})
wandb.config.update({'Negative Prompts': negative_prompts})
wandb.config.update({'lr': self.lr, 'iterations': self.iterations})
if image_path:
__snake_case = Image.open(lowercase_)
__snake_case = image.resize((2_5_6, 2_5_6))
wandb.log('Original Image' , wandb.Image(lowercase_))
def _a ( self , lowercase_) -> Optional[int]:
if not prompts:
return []
__snake_case = []
__snake_case = []
if isinstance(lowercase_ , lowercase_):
__snake_case = [prompt.strip() for prompt in prompts.split('|')]
for prompt in prompts:
if isinstance(lowercase_ , (tuple, list)):
__snake_case = prompt[0]
__snake_case = float(prompt[1])
elif ":" in prompt:
__snake_case , __snake_case = prompt.split(':')
__snake_case = float(lowercase_)
else:
__snake_case = prompt
__snake_case = 1.0
processed_prompts.append(lowercase_)
weights.append(lowercase_)
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowercase_ , device=self.device),
}
def _a ( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=True , lowercase_=None , ) -> List[str]:
if image_path:
__snake_case = self._get_latent(lowercase_)
else:
__snake_case = torch.randn(self.latent_dim , device=self.device)
if self.log:
self._init_logging(lowercase_ , lowercase_ , lowercase_)
assert pos_prompts, "You must provide at least one positive prompt."
__snake_case = self.process_prompts(lowercase_)
__snake_case = self.process_prompts(lowercase_)
if save_final and save_path is None:
__snake_case = os.path.join('./outputs/' , '_'.join(pos_prompts['prompts']))
if not os.path.exists(lowercase_):
os.makedirs(lowercase_)
else:
__snake_case = save_path + '_' + get_timestamp()
os.makedirs(lowercase_)
__snake_case = save_path
__snake_case = self.vqgan.decode(self.latent)[0]
if show_intermediate:
print('Original Image')
show_pil(custom_to_pil(lowercase_))
__snake_case = loop_post_process(lowercase_)
for iter, transformed_img in enumerate(self._optimize_CLIP(lowercase_ , lowercase_ , lowercase_)):
if show_intermediate:
show_pil(lowercase_)
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F"iter_{iter:03d}.png"))
if self.log:
wandb.log({'Image': wandb.Image(lowercase_)})
if show_final:
show_pil(lowercase_)
if save_final:
transformed_img.save(os.path.join(self.save_path , F"iter_{iter:03d}_final.png"))
| 313 | 1 |
from __future__ import annotations
import math
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if len(_UpperCAmelCase ) != 2 or len(a[0] ) != 2 or len(_UpperCAmelCase ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
lowercase = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_UpperCAmelCase ) )
]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_UpperCAmelCase ) )
]
def __snake_case ( _UpperCAmelCase ):
"""simple docstring"""
if len(_UpperCAmelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
lowercase = len(_UpperCAmelCase )
lowercase = matrix_length // 2
lowercase = [[a[i][j] for j in range(_UpperCAmelCase , _UpperCAmelCase )] for i in range(_UpperCAmelCase )]
lowercase = [
[a[i][j] for j in range(_UpperCAmelCase , _UpperCAmelCase )] for i in range(_UpperCAmelCase , _UpperCAmelCase )
]
lowercase = [[a[i][j] for j in range(_UpperCAmelCase )] for i in range(_UpperCAmelCase )]
lowercase = [[a[i][j] for j in range(_UpperCAmelCase )] for i in range(_UpperCAmelCase , _UpperCAmelCase )]
return top_left, top_right, bot_left, bot_right
def __snake_case ( _UpperCAmelCase ):
"""simple docstring"""
return len(_UpperCAmelCase ), len(matrix[0] )
def __snake_case ( _UpperCAmelCase ):
"""simple docstring"""
print('\n'.join(str(_UpperCAmelCase ) for line in matrix ) )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if matrix_dimensions(_UpperCAmelCase ) == (2, 2):
return default_matrix_multiplication(_UpperCAmelCase , _UpperCAmelCase )
lowercase , lowercase , lowercase , lowercase = split_matrix(_UpperCAmelCase )
lowercase , lowercase , lowercase , lowercase = split_matrix(_UpperCAmelCase )
lowercase = actual_strassen(_UpperCAmelCase , matrix_subtraction(_UpperCAmelCase , _UpperCAmelCase ) )
lowercase = actual_strassen(matrix_addition(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
lowercase = actual_strassen(matrix_addition(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
lowercase = actual_strassen(_UpperCAmelCase , matrix_subtraction(_UpperCAmelCase , _UpperCAmelCase ) )
lowercase = actual_strassen(matrix_addition(_UpperCAmelCase , _UpperCAmelCase ) , matrix_addition(_UpperCAmelCase , _UpperCAmelCase ) )
lowercase = actual_strassen(matrix_subtraction(_UpperCAmelCase , _UpperCAmelCase ) , matrix_addition(_UpperCAmelCase , _UpperCAmelCase ) )
lowercase = actual_strassen(matrix_subtraction(_UpperCAmelCase , _UpperCAmelCase ) , matrix_addition(_UpperCAmelCase , _UpperCAmelCase ) )
lowercase = matrix_addition(matrix_subtraction(matrix_addition(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase ) , _UpperCAmelCase )
lowercase = matrix_addition(_UpperCAmelCase , _UpperCAmelCase )
lowercase = matrix_addition(_UpperCAmelCase , _UpperCAmelCase )
lowercase = matrix_subtraction(matrix_subtraction(matrix_addition(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase ) , _UpperCAmelCase )
# construct the new matrix from our 4 quadrants
lowercase = []
for i in range(len(_UpperCAmelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(_UpperCAmelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if matrix_dimensions(_UpperCAmelCase )[1] != matrix_dimensions(_UpperCAmelCase )[0]:
lowercase = (
'Unable to multiply these matrices, please check the dimensions.\n'
f"""Matrix A: {matrixa}\n"""
f"""Matrix B: {matrixa}"""
)
raise Exception(_UpperCAmelCase )
lowercase = matrix_dimensions(_UpperCAmelCase )
lowercase = matrix_dimensions(_UpperCAmelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
lowercase = max(*_UpperCAmelCase , *_UpperCAmelCase )
lowercase = int(math.pow(2 , math.ceil(math.loga(_UpperCAmelCase ) ) ) )
lowercase = matrixa
lowercase = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , _UpperCAmelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _UpperCAmelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _UpperCAmelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
lowercase = actual_strassen(_UpperCAmelCase , _UpperCAmelCase )
# Removing the additional zeros
for i in range(0 , _UpperCAmelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _UpperCAmelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
__magic_name__ = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
__magic_name__ = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 715 |
def __snake_case ( _UpperCAmelCase = 10 ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or n < 0:
raise ValueError('Invalid input' )
lowercase = 10**n
lowercase = 2_84_33 * (pow(2 , 7_83_04_57 , _UpperCAmelCase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(10) = }""")
| 314 | 0 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
def __init__(self , __a , __a , __a=10_24 , __a=10_24 , __a=3.6 ):
'''simple docstring'''
lowerCamelCase = tokenizer
lowerCamelCase = tokenizer.bos_token_id
lowerCamelCase = dataset
lowerCamelCase = seq_length
lowerCamelCase = seq_length * chars_per_token * num_of_sequences
def __iter__(self ):
'''simple docstring'''
lowerCamelCase = iter(self.dataset )
lowerCamelCase = True
while more_examples:
lowerCamelCase , lowerCamelCase = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__a )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
lowerCamelCase = False
break
lowerCamelCase = tokenizer(__a , truncation=__a )["input_ids"]
lowerCamelCase = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(__a ) , self.seq_length ):
lowerCamelCase = all_token_ids[i : i + self.seq_length]
if len(__a ) == self.seq_length:
yield torch.tensor(__a )
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = {"streaming": True}
lowerCamelCase = load_dataset(args.dataset_name , split="train" , **UpperCAmelCase__ )
lowerCamelCase = ConstantLengthDataset(UpperCAmelCase__ , UpperCAmelCase__ , seq_length=args.seq_length )
lowerCamelCase = DataLoader(UpperCAmelCase__ , batch_size=args.batch_size )
return eval_dataloader
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
model.eval()
lowerCamelCase = []
for step, batch in enumerate(UpperCAmelCase__ ):
with torch.no_grad():
lowerCamelCase = model(UpperCAmelCase__ , labels=UpperCAmelCase__ )
lowerCamelCase = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(UpperCAmelCase__ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
lowerCamelCase = torch.mean(torch.cat(UpperCAmelCase__ ) )
try:
lowerCamelCase = torch.exp(UpperCAmelCase__ )
except OverflowError:
lowerCamelCase = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
a_ : Any = Accelerator()
# Parse configuration
a_ : Union[str, Any] = HfArgumentParser(EvaluationArguments)
a_ : Optional[int] = parser.parse_args()
set_seed(args.seed)
# Logging
a_ : Dict = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
a_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
a_ : List[Any] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
a_ : Dict = create_dataloader(args)
# Prepare everything with our `accelerator`.
a_ , a_ : List[Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
a_ , a_ : Optional[int] = evaluate(args)
logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""") | 623 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def __init__(self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=4_00 , __a=True , __a=None , __a=True , ):
'''simple docstring'''
lowerCamelCase = size if size is not None else {"height": 18, "width": 18}
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = num_channels
lowerCamelCase = image_size
lowerCamelCase = min_resolution
lowerCamelCase = max_resolution
lowerCamelCase = do_resize
lowerCamelCase = size
lowerCamelCase = apply_ocr
def _a (self ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase):
"""simple docstring"""
_A = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _a (self ):
'''simple docstring'''
lowerCamelCase = LayoutLMvaImageProcessingTester(self )
@property
def _a (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , "do_resize" ) )
self.assertTrue(hasattr(__a , "size" ) )
self.assertTrue(hasattr(__a , "apply_ocr" ) )
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def _a (self ):
'''simple docstring'''
pass
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
lowerCamelCase = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , __a )
self.assertIsInstance(encoding.boxes , __a )
# Test batched
lowerCamelCase = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
lowerCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
lowerCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _a (self ):
'''simple docstring'''
lowerCamelCase = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCamelCase = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
lowerCamelCase = Image.open(ds[0]["file"] ).convert("RGB" )
lowerCamelCase = image_processing(__a , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCamelCase = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "โIntroductory", "Remarksโ", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
lowerCamelCase = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __a )
self.assertListEqual(encoding.boxes , __a )
# with apply_OCR = False
lowerCamelCase = LayoutLMvaImageProcessor(apply_ocr=__a )
lowerCamelCase = image_processing(__a , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) ) | 623 | 1 |
import math
class lowerCAmelCase__:
'''simple docstring'''
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> int:
_SCREAMING_SNAKE_CASE : List[Any] = 0.0
_SCREAMING_SNAKE_CASE : Tuple = 0.0
for i in range(len(__lowerCamelCase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> list[list[int | float]]:
for i in range(len(__lowerCamelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : List[Any] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_SCREAMING_SNAKE_CASE : Tuple = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_SCREAMING_SNAKE_CASE : Optional[int] = SelfOrganizingMap()
_SCREAMING_SNAKE_CASE : Any = 3
_SCREAMING_SNAKE_CASE : Dict = 0.5
for _ in range(__snake_case ):
for j in range(len(__snake_case ) ):
# training sample
_SCREAMING_SNAKE_CASE : List[Any] = training_samples[j]
# Compute the winning vector
_SCREAMING_SNAKE_CASE : Tuple = self_organizing_map.get_winner(__snake_case, __snake_case )
# Update the winning vector
_SCREAMING_SNAKE_CASE : Union[str, Any] = self_organizing_map.update(__snake_case, __snake_case, __snake_case, __snake_case )
# classify test sample
_SCREAMING_SNAKE_CASE : Optional[int] = [0, 0, 0, 1]
_SCREAMING_SNAKE_CASE : int = self_organizing_map.get_winner(__snake_case, __snake_case )
# results
print(f"""Clusters that the test sample belongs to : {winner}""" )
print(f"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main() | 713 |
import cmath
import math
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = math.radians(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = math.radians(__lowerCamelCase )
# Convert voltage and current to rectangular form
_SCREAMING_SNAKE_CASE : str = cmath.rect(__lowerCamelCase, __lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = cmath.rect(__lowerCamelCase, __lowerCamelCase )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod() | 381 | 0 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class __A:
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = embedding_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = scope
def UpperCAmelCase_ (self ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ (self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = MobileBertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = MobileBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = MobileBertForNextSentencePrediction(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = MobileBertForPreTraining(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , next_sentence_label=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = MobileBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = MobileBertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = MobileBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.num_choices
UpperCamelCase__ = MobileBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = (
{
"""feature-extraction""": MobileBertModel,
"""fill-mask""": MobileBertForMaskedLM,
"""question-answering""": MobileBertForQuestionAnswering,
"""text-classification""": MobileBertForSequenceClassification,
"""token-classification""": MobileBertForTokenClassification,
"""zero-shot""": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = True
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
UpperCamelCase__ = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
return inputs_dict
def UpperCAmelCase_ (self ):
UpperCamelCase__ = MobileBertModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase_ (self ):
self.config_tester.run_common_tests()
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def __magic_name__ ( __a : List[str] ):
'''simple docstring'''
return torch.tensor(
__a , dtype=torch.long , device=__a , )
lowerCamelCase_ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class __A( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ (self ):
UpperCamelCase__ = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase__ = torch.Size((1, 9, 5_12) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.tensor(
[
[
[-2.4736526E07, 8.2691656E04, 1.6521838E05],
[-5.7541704E-01, 3.9056022E00, 4.4011507E00],
[2.6047359E00, 1.5677652E00, -1.7324188E-01],
]
] , device=SCREAMING_SNAKE_CASE_ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
UpperCamelCase__ = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
UpperCamelCase__ = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 513 |
from __future__ import annotations
import time
import numpy as np
lowerCamelCase_ = [8, 5, 9, 7]
lowerCamelCase_ = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
lowerCamelCase_ = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __A:
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase__ = claim_vector
UpperCamelCase__ = allocated_resources_table
UpperCamelCase__ = maximum_claim_table
def UpperCAmelCase_ (self ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def UpperCAmelCase_ (self ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def UpperCAmelCase_ (self ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(SCREAMING_SNAKE_CASE_ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def UpperCAmelCase_ (self ):
return {self.__need().index(SCREAMING_SNAKE_CASE_ ): i for i in self.__need()}
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.__need()
UpperCamelCase__ = self.__allocated_resources_table
UpperCamelCase__ = self.__available_resources()
UpperCamelCase__ = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 50 + """\n""" )
while need_list:
UpperCamelCase__ = False
for each_need in need_list:
UpperCamelCase__ = True
for index, need in enumerate(SCREAMING_SNAKE_CASE_ ):
if need > available_resources[index]:
UpperCamelCase__ = False
break
if execution:
UpperCamelCase__ = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
UpperCamelCase__ = original_need_index
print(F"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(SCREAMING_SNAKE_CASE_ )
# update available/freed resources stack
UpperCamelCase__ = np.array(SCREAMING_SNAKE_CASE_ ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(SCREAMING_SNAKE_CASE_ ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def UpperCAmelCase_ (self ):
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
F"P{self.__allocated_resources_table.index(SCREAMING_SNAKE_CASE_ ) + 1}"
+ """ """.join(F"{it:>8}" for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
F"P{self.__maximum_claim_table.index(SCREAMING_SNAKE_CASE_ ) + 1}"
+ """ """.join(F"{it:>8}" for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(SCREAMING_SNAKE_CASE_ ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(SCREAMING_SNAKE_CASE_ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 513 | 1 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> List[Any]:
"""simple docstring"""
snake_case_ = analyze_text(__UpperCamelCase )
snake_case_ = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
snake_case_ = sum(single_char_strings.values() )
# one length string
snake_case_ = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
snake_case_ = single_char_strings[ch]
snake_case_ = my_str / all_sum
my_fir_sum += prob * math.loga(__UpperCamelCase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
snake_case_ = sum(two_char_strings.values() )
snake_case_ = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
snake_case_ = cha + cha
if sequence in two_char_strings:
snake_case_ = two_char_strings[sequence]
snake_case_ = int(__UpperCamelCase ) / all_sum
my_sec_sum += prob * math.loga(__UpperCamelCase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Optional[int]:
"""simple docstring"""
snake_case_ = Counter() # type: ignore
snake_case_ = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(__UpperCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __lowerCAmelCase ()-> str:
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main() | 721 |
import requests
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> None:
"""simple docstring"""
snake_case_ = {'''Content-Type''': '''application/json'''}
snake_case_ = requests.post(SCREAMING_SNAKE_CASE , json={'''text''': message_body} , headers=SCREAMING_SNAKE_CASE )
if response.status_code != 200:
snake_case_ = (
'''Request to slack returned an error '''
f'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""") | 531 | 0 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def UpperCamelCase_( _A :float , _A :float , _A :bool = False )-> Optional[Any]:
if radian_mode:
return [magnitude * cos(lowerCamelCase_ ), magnitude * sin(lowerCamelCase_ )]
return [magnitude * cos(radians(lowerCamelCase_ ) ), magnitude * sin(radians(lowerCamelCase_ ) )]
def UpperCamelCase_( _A :NDArray[floataa] , _A :NDArray[floataa] , _A :float = 10**-1 )-> Optional[int]:
UpperCamelCase__ = cross(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = sum(lowerCamelCase_ )
return abs(lowerCamelCase_ ) < eps
if __name__ == "__main__":
# Test to check if it works
__UpperCamelCase = array(
[
polar_force(718.4, 1_8_0 - 3_0),
polar_force(879.54, 4_5),
polar_force(1_0_0, -9_0),
]
)
__UpperCamelCase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__UpperCamelCase = array(
[
polar_force(3_0 * 9.81, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
__UpperCamelCase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__UpperCamelCase = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
__UpperCamelCase = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 551 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: int , lowerCamelCase_: int ):
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 449 | 0 |
"""simple docstring"""
import math
def snake_case__ ( _snake_case : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case__ ( _snake_case : float = 0.1 ):
"""simple docstring"""
UpperCamelCase__ = 3
UpperCamelCase__ = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_snake_case )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 304 | """simple docstring"""
from collections.abc import Sequence
def snake_case__ ( _snake_case : Sequence[float] , _snake_case : bool = False ):
"""simple docstring"""
if not arr:
return 0
UpperCamelCase__ = 0 if allow_empty_subarrays else float("-inf" )
UpperCamelCase__ = 0.0
for num in arr:
UpperCamelCase__ = max(0 if allow_empty_subarrays else num , curr_sum + num )
UpperCamelCase__ = max(_snake_case , _snake_case )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
A : str = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"{max_subarray_sum(nums) = }") | 304 | 1 |
"""simple docstring"""
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = ArgumentParser(
"""HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=lowerCamelCase__ )
lowerCAmelCase__ = parser.add_subparsers(help="""datasets-cli command helpers""" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(lowerCamelCase__ )
EnvironmentCommand.register_subcommand(lowerCamelCase__ )
TestCommand.register_subcommand(lowerCamelCase__ )
RunBeamCommand.register_subcommand(lowerCamelCase__ )
DummyDataCommand.register_subcommand(lowerCamelCase__ )
# Parse args
lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_known_args()
if not hasattr(lowerCamelCase__ , """func""" ):
parser.print_help()
exit(1 )
lowerCAmelCase__ = parse_unknown_args(lowerCamelCase__ )
# Run
lowerCAmelCase__ = args.func(lowerCamelCase__ , **lowerCamelCase__ )
service.run()
if __name__ == "__main__":
main()
| 644 | """simple docstring"""
import math
from datetime import datetime, timedelta
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = year % 19
lowerCAmelCase__ = year % 4
lowerCAmelCase__ = year % 7
lowerCAmelCase__ = math.floor(year / 100 )
lowerCAmelCase__ = math.floor((13 + 8 * leap_day_inhibits) / 25 )
lowerCAmelCase__ = leap_day_inhibits / 4
lowerCAmelCase__ = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
lowerCAmelCase__ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
lowerCAmelCase__ = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
lowerCAmelCase__ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(lowerCamelCase__ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(lowerCamelCase__ , 4 , 18 )
else:
return datetime(lowerCamelCase__ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
__lowerCAmelCase : List[str] = "will be" if year > datetime.now().year else "was"
print(F"Easter in {year} {tense} {gauss_easter(year)}")
| 644 | 1 |
from PIL import Image
def a ( SCREAMING_SNAKE_CASE_ : Image , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : Tuple = (2_5_9 * (level + 2_5_5)) / (2_5_5 * (2_5_9 - level))
def contrast(SCREAMING_SNAKE_CASE_ : int ) -> int:
return int(1_2_8 + factor * (c - 1_2_8) )
return img.point(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change contrast to 170
__UpperCAmelCase : Dict = change_contrast(img, 170)
cont_img.save("image_data/lena_high_contrast.png", format="png")
| 713 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as input_file:
UpperCamelCase : str = re.compile(R'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
UpperCamelCase : Optional[int] = input_file.read()
UpperCamelCase : Union[str, Any] = regexp.search(__SCREAMING_SNAKE_CASE )
return match
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as input_file:
UpperCamelCase : Optional[int] = re.compile(R'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL )
UpperCamelCase : Tuple = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCamelCase : Dict = regexp.finditer(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = Path('''./datasets''' )
UpperCamelCase : Tuple = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__SCREAMING_SNAKE_CASE ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = Path('''./datasets''' )
UpperCamelCase : Tuple = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(__SCREAMING_SNAKE_CASE ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 643 | 0 |
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class a_ ( snake_case ):
UpperCAmelCase : Optional[Any] = ["""pixel_values"""]
def __init__( self : List[str] , a_ : bool = True , a_ : Dict[str, int] = None , a_ : PILImageResampling = PILImageResampling.BICUBIC , a_ : bool = True , a_ : Dict[str, int] = None , a_ : bool = True , a_ : Union[int, float] = 1 / 2_5_5 , a_ : bool = True , a_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , a_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **a_ : Any , ) -> None:
super().__init__(**a_ )
snake_case: Any =size if size is not None else {'shortest_edge': 2_2_4}
snake_case: List[str] =get_size_dict(a_ , default_to_square=a_ )
snake_case: int =crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
snake_case: Union[str, Any] =get_size_dict(a_ , param_name='crop_size' )
snake_case: List[str] =do_resize
snake_case: Any =size
snake_case: Dict =resample
snake_case: Dict =do_center_crop
snake_case: Union[str, Any] =crop_size
snake_case: Optional[Any] =do_rescale
snake_case: Any =rescale_factor
snake_case: Union[str, Any] =do_normalize
snake_case: Union[str, Any] =image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
snake_case: Any =image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase ( self : List[Any] , a_ : np.ndarray , a_ : Dict[str, int] , a_ : PILImageResampling = PILImageResampling.BICUBIC , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Tuple , ) -> np.ndarray:
snake_case: int =get_size_dict(a_ , default_to_square=a_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
snake_case: Union[str, Any] =int((2_5_6 / 2_2_4) * size['shortest_edge'] )
snake_case: Union[str, Any] =get_resize_output_image_size(a_ , size=a_ , default_to_square=a_ )
snake_case: Optional[Any] ={'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
a_ , size=(size_dict['height'], size_dict['width']) , resample=a_ , data_format=a_ , **a_ )
def UpperCamelCase ( self : Optional[int] , a_ : np.ndarray , a_ : Dict[str, int] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Dict , ) -> np.ndarray:
snake_case: Dict =get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(a_ , size=(size['height'], size['width']) , data_format=a_ , **a_ )
def UpperCamelCase ( self : Tuple , a_ : np.ndarray , a_ : Union[int, float] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Optional[Any] , ) -> np.ndarray:
return rescale(a_ , scale=a_ , data_format=a_ , **a_ )
def UpperCamelCase ( self : Union[str, Any] , a_ : np.ndarray , a_ : Union[float, List[float]] , a_ : Union[float, List[float]] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Optional[Any] , ) -> np.ndarray:
return normalize(a_ , mean=a_ , std=a_ , data_format=a_ , **a_ )
def UpperCamelCase ( self : Optional[int] , a_ : ImageInput , a_ : Optional[bool] = None , a_ : Optional[Dict[str, int]] = None , a_ : PILImageResampling = None , a_ : Optional[bool] = None , a_ : Optional[Dict[str, int]] = None , a_ : Optional[bool] = None , a_ : Optional[float] = None , a_ : Optional[bool] = None , a_ : Optional[Union[float, Iterable[float]]] = None , a_ : Optional[Union[float, Iterable[float]]] = None , a_ : Optional[TensorType] = None , a_ : ChannelDimension = ChannelDimension.FIRST , **a_ : List[str] , ) -> BatchFeature:
snake_case: Optional[Any] =do_resize if do_resize is not None else self.do_resize
snake_case: List[str] =resample if resample is not None else self.resample
snake_case: Any =do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case: Union[str, Any] =do_rescale if do_rescale is not None else self.do_rescale
snake_case: Any =rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case: Any =do_normalize if do_normalize is not None else self.do_normalize
snake_case: int =image_mean if image_mean is not None else self.image_mean
snake_case: Union[str, Any] =image_std if image_std is not None else self.image_std
snake_case: int =size if size is not None else self.size
snake_case: Union[str, Any] =get_size_dict(a_ , default_to_square=a_ )
snake_case: Optional[int] =crop_size if crop_size is not None else self.crop_size
snake_case: Any =get_size_dict(a_ , param_name='crop_size' )
snake_case: str =make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
snake_case: List[str] =[to_numpy_array(a_ ) for image in images]
if do_resize:
snake_case: Dict =[self.resize(a_ , a_ , a_ ) for image in images]
if do_center_crop:
snake_case: Union[str, Any] =[self.center_crop(a_ , a_ ) for image in images]
if do_rescale:
snake_case: List[str] =[self.rescale(a_ , a_ ) for image in images]
if do_normalize:
snake_case: Dict =[self.normalize(a_ , a_ , a_ ) for image in images]
snake_case: Optional[Any] =[to_channel_dimension_format(a_ , a_ ) for image in images]
snake_case: int ={'pixel_values': images}
return BatchFeature(data=a_ , tensor_type=a_ )
| 350 |
'''simple docstring'''
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( snake_case , unittest.TestCase ):
UpperCAmelCase : int = FunnelTokenizer
UpperCAmelCase : Tuple = FunnelTokenizerFast
UpperCAmelCase : List[str] = True
UpperCAmelCase : Tuple = True
def UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
super().setUp()
snake_case: Optional[Any] =[
'<unk>',
'<cls>',
'<sep>',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
snake_case: Any =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCamelCase ( self : Tuple , **a_ : str ) -> Any:
return FunnelTokenizer.from_pretrained(self.tmpdirname , **a_ )
def UpperCamelCase ( self : List[Any] , **a_ : int ) -> str:
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **a_ )
def UpperCamelCase ( self : Any , a_ : Union[str, Any] ) -> Union[str, Any]:
snake_case: List[str] ='UNwant\u00E9d,running'
snake_case: Dict ='unwanted, running'
return input_text, output_text
def UpperCamelCase ( self : Optional[int] ) -> List[Any]:
snake_case: Union[str, Any] =self.tokenizer_class(self.vocab_file )
snake_case: Dict =tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(a_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , [7, 4, 5, 1_0, 8, 9] )
def UpperCamelCase ( self : List[str] ) -> Dict:
snake_case: Optional[Any] =self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
snake_case: str =tokenizer('UNwant\u00E9d,running' )
snake_case: Union[str, Any] =len(inputs['input_ids'] ) - 1
self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len )
snake_case: List[str] =tokenizer('UNwant\u00E9d,running' , 'UNwant\u00E9d,running' )
self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len + [1] * sentence_len )
| 350 | 1 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> float:
UpperCAmelCase : List[Any] = x
UpperCAmelCase : List[str] = y
for step in range(_lowercase ): # noqa: B007
UpperCAmelCase : Optional[Any] = a * a - b * b + x
UpperCAmelCase : Union[str, Any] = 2 * a * b + y
UpperCAmelCase : List[Any] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __lowerCamelCase ( _lowercase ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def __lowerCamelCase ( _lowercase ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(_lowercase , 1 , 1 ) )
def __lowerCamelCase ( _lowercase = 8_0_0 , _lowercase = 6_0_0 , _lowercase = -0.6 , _lowercase = 0 , _lowercase = 3.2 , _lowercase = 5_0 , _lowercase = True , ) -> Image.Image:
UpperCAmelCase : Optional[int] = Image.new("""RGB""" , (image_width, image_height) )
UpperCAmelCase : Any = img.load()
# loop through the image-coordinates
for image_x in range(_lowercase ):
for image_y in range(_lowercase ):
# determine the figure-coordinates based on the image-coordinates
UpperCAmelCase : Dict = figure_width / image_width * image_height
UpperCAmelCase : Dict = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCAmelCase : List[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCAmelCase : Optional[Any] = get_distance(_lowercase , _lowercase , _lowercase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCAmelCase : Dict = get_color_coded_rgb(_lowercase )
else:
UpperCAmelCase : int = get_black_and_white_rgb(_lowercase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
a : Optional[int] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 672 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : Any = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = MobileNetVaConfig(layer_norm_eps=0.0_01 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
__snake_case : List[Any] = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""" , _lowerCamelCase )
if matches:
__snake_case : Optional[Any] = float(matches[1] )
__snake_case : Union[str, Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__snake_case : Tuple = 1001
__snake_case : Any = """imagenet-1k-id2label.json"""
__snake_case : Optional[Any] = """huggingface/label-files"""
__snake_case : List[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__snake_case : Dict = {int(_lowerCamelCase ) + 1: v for k, v in idalabel.items()}
__snake_case : List[str] = """background"""
__snake_case : List[str] = idalabel
__snake_case : List[Any] = {v: k for k, v in idalabel.items()}
return config
def _a ( ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__snake_case : List[Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = get_mobilenet_va_config(_lowerCamelCase )
# Load ๐ค model
__snake_case : Optional[Any] = MobileNetVaForImageClassification(_lowerCamelCase ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__snake_case : Optional[int] = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
__snake_case : Tuple = image_processor(images=prepare_img() , return_tensors="""pt""" )
__snake_case : Optional[Any] = model(**_lowerCamelCase )
__snake_case : List[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__snake_case : str = torch.tensor([-4.17_39, -1.12_33, 3.12_05] )
elif model_name == "mobilenet_v1_0.75_192":
__snake_case : Tuple = torch.tensor([-3.94_40, -2.31_41, -0.33_33] )
else:
__snake_case : List[Any] = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _lowerCamelCase , atol=1E-4 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
__snake_case : Optional[Any] = """google/""" + model_name
image_processor.push_to_hub(_lowerCamelCase )
model.push_to_hub(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the ๐ค hub."
)
__UpperCamelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 26 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a: str = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a: List[Any] = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a: int = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_a: List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 715 |
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a: Dict = logging.get_logger(__name__)
class __UpperCamelCase ( lowercase ):
SCREAMING_SNAKE_CASE__ = ['input_ids', 'attention_mask']
def __init__( self : Tuple , lowerCAmelCase : Union[str, Any]="</s>" , lowerCAmelCase : str="<unk>" , lowerCAmelCase : Optional[int]="<pad>" , lowerCAmelCase : List[str]=125 , lowerCAmelCase : List[Any]=None , **lowerCAmelCase : Optional[Any] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase_ = [F"<extra_id_{i}>" for i in range(lowerCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCAmelCase_ = len(set(filter(lambda lowerCAmelCase : bool("extra_id" in str(lowerCAmelCase ) ) , lowerCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"
" extra_ids tokens" )
UpperCAmelCase_ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else pad_token
UpperCAmelCase_ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else eos_token
UpperCAmelCase_ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else unk_token
super().__init__(
eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , extra_ids=lowerCAmelCase , additional_special_tokens=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = extra_ids
UpperCAmelCase_ = 2**8 # utf is 8 bits
# define special tokens dict
UpperCAmelCase_ = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
UpperCAmelCase_ = len(self.special_tokens_encoder )
UpperCAmelCase_ = len(lowerCAmelCase )
for i, token in enumerate(lowerCAmelCase ):
UpperCAmelCase_ = self.vocab_size + i - n
UpperCAmelCase_ = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def __A ( self : Optional[Any] ):
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def __A ( self : List[Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowerCAmelCase )) + [1]
return ([0] * len(lowerCAmelCase )) + [1] + ([0] * len(lowerCAmelCase )) + [1]
def __A ( self : Any , lowerCAmelCase : List[int] ):
'''simple docstring'''
if len(lowerCAmelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __A ( self : Optional[Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __A ( self : Tuple , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ = self._add_eos_if_not_present(lowerCAmelCase )
if token_ids_a is None:
return token_ids_a
else:
UpperCAmelCase_ = self._add_eos_if_not_present(lowerCAmelCase )
return token_ids_a + token_ids_a
def __A ( self : List[str] , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = [chr(lowerCAmelCase ) for i in text.encode("utf-8" )]
return tokens
def __A ( self : Optional[Any] , lowerCAmelCase : List[Any] ):
'''simple docstring'''
if token in self.special_tokens_encoder:
UpperCAmelCase_ = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
UpperCAmelCase_ = self.added_tokens_encoder[token]
elif len(lowerCAmelCase ) != 1:
UpperCAmelCase_ = self.unk_token_id
else:
UpperCAmelCase_ = ord(lowerCAmelCase ) + self._num_special_tokens
return token_id
def __A ( self : str , lowerCAmelCase : Any ):
'''simple docstring'''
if index in self.special_tokens_decoder:
UpperCAmelCase_ = self.special_tokens_decoder[index]
else:
UpperCAmelCase_ = chr(index - self._num_special_tokens )
return token
def __A ( self : Union[str, Any] , lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ = b""
for token in tokens:
if token in self.special_tokens_decoder:
UpperCAmelCase_ = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.added_tokens_decoder:
UpperCAmelCase_ = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.special_tokens_encoder:
UpperCAmelCase_ = token.encode("utf-8" )
elif token in self.added_tokens_encoder:
UpperCAmelCase_ = token.encode("utf-8" )
else:
UpperCAmelCase_ = bytes([ord(lowerCAmelCase )] )
bstring += tok_string
UpperCAmelCase_ = bstring.decode("utf-8" , errors="ignore" )
return string
def __A ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ):
'''simple docstring'''
return () | 268 | 0 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
def __init__( self :str , a :Optional[int] , a :str=1_3 , a :Dict=7 , a :str=True , a :Dict=True , a :List[Any]=True , a :Any=True , a :Optional[Any]=True , a :Union[str, Any]=False , a :List[str]=False , a :List[str]=False , a :Tuple=2 , a :Dict=9_9 , a :List[Any]=0 , a :Dict=3_2 , a :str=5 , a :str=4 , a :Dict=0.1 , a :Union[str, Any]=0.1 , a :Union[str, Any]=5_1_2 , a :Optional[int]=1_2 , a :Dict=2 , a :Optional[int]=0.02 , a :Optional[Any]=3 , a :int=4 , a :int="last" , a :List[Any]=None , a :List[Any]=None , ) -> Dict:
__UpperCamelCase : Any = parent
__UpperCamelCase : Tuple = batch_size
__UpperCamelCase : Tuple = seq_length
__UpperCamelCase : Tuple = is_training
__UpperCamelCase : Dict = use_input_lengths
__UpperCamelCase : Any = use_token_type_ids
__UpperCamelCase : Tuple = use_labels
__UpperCamelCase : Dict = gelu_activation
__UpperCamelCase : Any = sinusoidal_embeddings
__UpperCamelCase : List[str] = causal
__UpperCamelCase : Optional[int] = asm
__UpperCamelCase : int = n_langs
__UpperCamelCase : List[str] = vocab_size
__UpperCamelCase : Tuple = n_special
__UpperCamelCase : Optional[Any] = hidden_size
__UpperCamelCase : Optional[int] = num_hidden_layers
__UpperCamelCase : int = num_attention_heads
__UpperCamelCase : Tuple = hidden_dropout_prob
__UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCamelCase : Optional[Any] = max_position_embeddings
__UpperCamelCase : Tuple = type_vocab_size
__UpperCamelCase : Tuple = type_sequence_label_size
__UpperCamelCase : Any = initializer_range
__UpperCamelCase : str = num_labels
__UpperCamelCase : Any = num_choices
__UpperCamelCase : Tuple = summary_type
__UpperCamelCase : Union[str, Any] = use_proj
__UpperCamelCase : Optional[Any] = scope
def _lowerCamelCase ( self :Union[str, Any] ) -> List[Any]:
__UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : Optional[int] = None
if self.use_input_lengths:
__UpperCamelCase : Optional[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase : Tuple = None
if self.use_token_type_ids:
__UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCamelCase : Union[str, Any] = None
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Dict = None
if self.use_labels:
__UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase : Any = ids_tensor([self.batch_size] , 2 ).float()
__UpperCamelCase : str = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase : List[str] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCamelCase ( self :Any ) -> Any:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _lowerCamelCase ( self :Optional[int] , a :Any , a :Optional[int] , a :List[Any] , a :str , a :str , a :Dict , a :Tuple , a :int , a :str , ) -> List[str]:
__UpperCamelCase : List[str] = FlaubertModel(config=a )
model.to(a )
model.eval()
__UpperCamelCase : List[Any] = model(a , lengths=a , langs=a )
__UpperCamelCase : Any = model(a , langs=a )
__UpperCamelCase : Optional[Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self :str , a :Optional[int] , a :Dict , a :Dict , a :str , a :Optional[int] , a :Any , a :int , a :str , a :Optional[int] , ) -> Tuple:
__UpperCamelCase : Optional[int] = FlaubertWithLMHeadModel(a )
model.to(a )
model.eval()
__UpperCamelCase : str = model(a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self :Optional[int] , a :str , a :Optional[Any] , a :List[Any] , a :Optional[Any] , a :Optional[Any] , a :Dict , a :Tuple , a :int , a :Union[str, Any] , ) -> Optional[int]:
__UpperCamelCase : str = FlaubertForQuestionAnsweringSimple(a )
model.to(a )
model.eval()
__UpperCamelCase : str = model(a )
__UpperCamelCase : List[Any] = model(a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self :Tuple , a :Optional[Any] , a :Tuple , a :Optional[Any] , a :Any , a :str , a :Any , a :Dict , a :Dict , a :Dict , ) -> Dict:
__UpperCamelCase : int = FlaubertForQuestionAnswering(a )
model.to(a )
model.eval()
__UpperCamelCase : Union[str, Any] = model(a )
__UpperCamelCase : Any = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , p_mask=a , )
__UpperCamelCase : Optional[Any] = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , )
((__UpperCamelCase) , ) : Tuple = result_with_labels.to_tuple()
__UpperCamelCase : List[Any] = model(a , start_positions=a , end_positions=a )
((__UpperCamelCase) , ) : List[str] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _lowerCamelCase ( self :Union[str, Any] , a :Optional[int] , a :Dict , a :Optional[Any] , a :Any , a :Tuple , a :Optional[Any] , a :Dict , a :List[Any] , a :Dict , ) -> Optional[Any]:
__UpperCamelCase : str = FlaubertForSequenceClassification(a )
model.to(a )
model.eval()
__UpperCamelCase : List[Any] = model(a )
__UpperCamelCase : int = model(a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self :Tuple , a :Optional[Any] , a :Any , a :Union[str, Any] , a :str , a :int , a :Tuple , a :List[Any] , a :Union[str, Any] , a :List[str] , ) -> int:
__UpperCamelCase : Union[str, Any] = self.num_labels
__UpperCamelCase : List[str] = FlaubertForTokenClassification(a )
model.to(a )
model.eval()
__UpperCamelCase : int = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self :Optional[Any] , a :Union[str, Any] , a :str , a :Tuple , a :Dict , a :Tuple , a :List[Any] , a :Dict , a :str , a :Union[str, Any] , ) -> Union[str, Any]:
__UpperCamelCase : List[Any] = self.num_choices
__UpperCamelCase : str = FlaubertForMultipleChoice(config=a )
model.to(a )
model.eval()
__UpperCamelCase : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase : str = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self :Any ) -> Optional[int]:
__UpperCamelCase : int = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : Any = config_and_inputs
__UpperCamelCase : Optional[int] = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( __lowercase , __lowercase , unittest.TestCase):
'''simple docstring'''
_A = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_A = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCamelCase ( self :Optional[int] , a :Tuple , a :Optional[Any] , a :Dict , a :Tuple , a :Dict ) -> str:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCamelCase ( self :Dict , a :Any , a :int , a :Optional[int]=False ) -> Union[str, Any]:
__UpperCamelCase : Tuple = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
__UpperCamelCase : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
__UpperCamelCase : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def _lowerCamelCase ( self :str ) -> Optional[Any]:
__UpperCamelCase : Optional[int] = FlaubertModelTester(self )
__UpperCamelCase : Optional[int] = ConfigTester(self , config_class=a , emb_dim=3_7 )
def _lowerCamelCase ( self :List[str] ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self :Optional[Any] ) -> Optional[Any]:
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a )
def _lowerCamelCase ( self :Tuple ) -> Dict:
__UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a )
def _lowerCamelCase ( self :Union[str, Any] ) -> Any:
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*a )
def _lowerCamelCase ( self :List[Any] ) -> str:
__UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a )
def _lowerCamelCase ( self :Dict ) -> List[Any]:
__UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a )
def _lowerCamelCase ( self :str ) -> Tuple:
__UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*a )
def _lowerCamelCase ( self :int ) -> int:
__UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*a )
@slow
def _lowerCamelCase ( self :Dict ) -> Optional[Any]:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : List[Any] = FlaubertModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def _lowerCamelCase ( self :int ) -> Tuple:
__UpperCamelCase , __UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
__UpperCamelCase : Dict = True
__UpperCamelCase : Any = model_class(config=a )
__UpperCamelCase : List[str] = self._prepare_for_class(a , a )
__UpperCamelCase : Union[str, Any] = torch.jit.trace(
a , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , "traced_model.pt" ) )
__UpperCamelCase : Optional[int] = torch.jit.load(os.path.join(a , "traced_model.pt" ) , map_location=a )
loaded(inputs_dict["input_ids"].to(a ) , inputs_dict["attention_mask"].to(a ) )
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
@slow
def _lowerCamelCase ( self :Any ) -> List[str]:
__UpperCamelCase : Tuple = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" )
__UpperCamelCase : Tuple = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
with torch.no_grad():
__UpperCamelCase : Dict = model(a )[0]
__UpperCamelCase : str = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , a )
__UpperCamelCase : Union[str, Any] = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1E-4 ) ) | 557 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Union[str, Any] = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = 'efficientformer'
def __init__( self :Union[str, Any] , a :List[int] = [3, 2, 6, 4] , a :List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , a :List[bool] = [True, True, True, True] , a :int = 4_4_8 , a :int = 3_2 , a :int = 4 , a :int = 7 , a :int = 5 , a :int = 8 , a :int = 4 , a :float = 0.0 , a :int = 1_6 , a :int = 3 , a :int = 3 , a :int = 3 , a :int = 2 , a :int = 1 , a :float = 0.0 , a :int = 1 , a :bool = True , a :bool = True , a :float = 1E-5 , a :str = "gelu" , a :float = 0.02 , a :float = 1E-1_2 , a :int = 2_2_4 , a :float = 1E-0_5 , **a :str , ) -> None:
super().__init__(**a )
__UpperCamelCase : Optional[Any] = hidden_act
__UpperCamelCase : Any = hidden_dropout_prob
__UpperCamelCase : str = hidden_sizes
__UpperCamelCase : Dict = num_hidden_layers
__UpperCamelCase : Optional[Any] = num_attention_heads
__UpperCamelCase : str = initializer_range
__UpperCamelCase : List[str] = layer_norm_eps
__UpperCamelCase : str = patch_size
__UpperCamelCase : str = num_channels
__UpperCamelCase : Any = depths
__UpperCamelCase : Tuple = mlp_expansion_ratio
__UpperCamelCase : List[str] = downsamples
__UpperCamelCase : Optional[int] = dim
__UpperCamelCase : Dict = key_dim
__UpperCamelCase : Optional[Any] = attention_ratio
__UpperCamelCase : Dict = resolution
__UpperCamelCase : Union[str, Any] = pool_size
__UpperCamelCase : Tuple = downsample_patch_size
__UpperCamelCase : Optional[int] = downsample_stride
__UpperCamelCase : Optional[int] = downsample_pad
__UpperCamelCase : Union[str, Any] = drop_path_rate
__UpperCamelCase : Union[str, Any] = num_metaad_blocks
__UpperCamelCase : List[str] = distillation
__UpperCamelCase : str = use_layer_scale
__UpperCamelCase : Tuple = layer_scale_init_value
__UpperCamelCase : str = image_size
__UpperCamelCase : int = batch_norm_eps | 557 | 1 |
"""simple docstring"""
from datetime import datetime
import requests
def _snake_case ( lowercase__ : Optional[Any] ) -> bytes:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
lowerCAmelCase_ :Optional[Any] = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(_SCREAMING_SNAKE_CASE ).content
if __name__ == "__main__":
__UpperCAmelCase = input('Enter Video/IGTV url: ').strip()
__UpperCAmelCase = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(F"""Done. Video saved to disk as {file_name}.""")
| 701 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__UpperCAmelCase = False
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Union[str, Any] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
lowerCAmelCase_ :Tuple = torch.manual_seed(0 )
lowerCAmelCase_ :Optional[int] = pipe.dual_guided(
prompt="""first prompt""" , image=__A , text_to_image_strength=0.7_5 , generator=__A , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__A )
lowerCAmelCase_ :Any = VersatileDiffusionPipeline.from_pretrained(__A , torch_dtype=torch.floataa )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :str = generator.manual_seed(0 )
lowerCAmelCase_ :Union[str, Any] = pipe.dual_guided(
prompt="""first prompt""" , image=__A , text_to_image_strength=0.7_5 , generator=__A , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Dict = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :str = """cyberpunk 2077"""
lowerCAmelCase_ :Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
lowerCAmelCase_ :List[str] = torch.manual_seed(0 )
lowerCAmelCase_ :Tuple = pipe.dual_guided(
prompt=__A , image=__A , text_to_image_strength=0.7_5 , generator=__A , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
lowerCAmelCase_ :int = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ :List[str] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowerCAmelCase_ :List[str] = """A painting of a squirrel eating a burger """
lowerCAmelCase_ :Union[str, Any] = torch.manual_seed(0 )
lowerCAmelCase_ :Dict = pipe.text_to_image(
prompt=__A , generator=__A , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
lowerCAmelCase_ :List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ :str = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowerCAmelCase_ :Any = pipe.image_variation(__A , generator=__A , output_type="""numpy""" ).images
lowerCAmelCase_ :Dict = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ :Union[str, Any] = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 256 | 0 |
import argparse
_a : Any = 'docs/source/_static/js/custom.js'
def a_ ( __magic_name__ ) -> List[Any]:
"""simple docstring"""
with open(__magic_name__ , encoding='''utf-8''' , newline='''\n''' ) as f:
snake_case : str = f.readlines()
snake_case : Optional[Any] = 0
# First let's put the right version
while not lines[index].startswith('''const stableVersion =''' ):
index += 1
snake_case : str = F"const stableVersion = \"v{version}\"\n"
# Then update the dictionary
while not lines[index].startswith('''const versionMapping = {''' ):
index += 1
# We go until the end
while not lines[index].startswith('''}''' ):
index += 1
# We add the new version at the end
lines[index - 1] += F" \"v{version}\": \"v{version}\",\n"
with open(__magic_name__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__magic_name__ )
if __name__ == "__main__":
_a : List[Any] = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
_a : Tuple = parser.parse_args()
update_custom_js(args.version)
| 598 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'fnet'
def __init__( self , lowercase=32_000 , lowercase=768 , lowercase=12 , lowercase=3_072 , lowercase="gelu_new" , lowercase=0.1 , lowercase=512 , lowercase=4 , lowercase=0.02 , lowercase=1e-12 , lowercase=False , lowercase=512 , lowercase=3 , lowercase=1 , lowercase=2 , **lowercase , ) -> int:
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
lowerCAmelCase = vocab_size
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = type_vocab_size
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = use_tpu_fourier_optimizations
lowerCAmelCase = tpu_short_seq_length
| 532 | 0 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Dict , a__ : List[str] , a__ : Union[str, Any]=7 , a__ : str=3 , a__ : Union[str, Any]=30 , a__ : List[Any]=400 , a__ : int=True , a__ : Optional[int]=None , a__ : Dict=True , a__ : List[Any]=[0.5, 0.5, 0.5] , a__ : int=[0.5, 0.5, 0.5] , a__ : Optional[int]=True , a__ : Union[str, Any]=1 / 255 , a__ : Dict=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__magic_name__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = num_channels
__magic_name__ = min_resolution
__magic_name__ = max_resolution
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = do_normalize
__magic_name__ = image_mean
__magic_name__ = image_std
__magic_name__ = do_rescale
__magic_name__ = rescale_factor
__magic_name__ = do_pad
def snake_case__ ( self : Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case__ ( self : Tuple , a__ : Dict , a__ : int=False ):
if not batched:
__magic_name__ = image_inputs[0]
if isinstance(a__ , Image.Image ):
__magic_name__ , __magic_name__ = image.size
else:
__magic_name__ , __magic_name__ = image.shape[1], image.shape[2]
if w < h:
__magic_name__ = int(self.size['''shortest_edge'''] * h / w )
__magic_name__ = self.size['''shortest_edge''']
elif w > h:
__magic_name__ = self.size['''shortest_edge''']
__magic_name__ = int(self.size['''shortest_edge'''] * w / h )
else:
__magic_name__ = self.size['''shortest_edge''']
__magic_name__ = self.size['''shortest_edge''']
else:
__magic_name__ = []
for image in image_inputs:
__magic_name__ , __magic_name__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__magic_name__ = max(a__ , key=lambda a__ : item[0] )[0]
__magic_name__ = max(a__ , key=lambda a__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( __a ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE :List[str] = DetaImageProcessor if is_vision_available() else None
def snake_case__ ( self : Any ):
__magic_name__ = DetaImageProcessingTester(self )
@property
def snake_case__ ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : Tuple ):
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , '''image_mean''' ) )
self.assertTrue(hasattr(a__ , '''image_std''' ) )
self.assertTrue(hasattr(a__ , '''do_normalize''' ) )
self.assertTrue(hasattr(a__ , '''do_resize''' ) )
self.assertTrue(hasattr(a__ , '''do_rescale''' ) )
self.assertTrue(hasattr(a__ , '''do_pad''' ) )
self.assertTrue(hasattr(a__ , '''size''' ) )
def snake_case__ ( self : Dict ):
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , a__ )
def snake_case__ ( self : Dict ):
pass
def snake_case__ ( self : List[str] ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__magic_name__ , __magic_name__ = self.image_processor_tester.get_expected_values(a__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ , __magic_name__ = self.image_processor_tester.get_expected_values(a__ , batched=a__ )
__magic_name__ = image_processing(a__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self : Dict ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__magic_name__ , __magic_name__ = self.image_processor_tester.get_expected_values(a__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ = image_processing(a__ , return_tensors='''pt''' ).pixel_values
__magic_name__ , __magic_name__ = self.image_processor_tester.get_expected_values(a__ , batched=a__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self : List[Any] ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__magic_name__ , __magic_name__ = self.image_processor_tester.get_expected_values(a__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ = image_processing(a__ , return_tensors='''pt''' ).pixel_values
__magic_name__ , __magic_name__ = self.image_processor_tester.get_expected_values(a__ , batched=a__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case__ ( self : Any ):
# prepare image and target
__magic_name__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
__magic_name__ = json.loads(f.read() )
__magic_name__ = {'''image_id''': 3_9769, '''annotations''': target}
# encode them
__magic_name__ = DetaImageProcessor()
__magic_name__ = image_processing(images=a__ , annotations=a__ , return_tensors='''pt''' )
# verify pixel values
__magic_name__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , a__ )
__magic_name__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , a__ , atol=1E-4 ) )
# verify area
__magic_name__ = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , a__ ) )
# verify boxes
__magic_name__ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , a__ )
__magic_name__ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , a__ , atol=1E-3 ) )
# verify image_id
__magic_name__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , a__ ) )
# verify is_crowd
__magic_name__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , a__ ) )
# verify class_labels
__magic_name__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , a__ ) )
# verify orig_size
__magic_name__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , a__ ) )
# verify size
__magic_name__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , a__ ) )
@slow
def snake_case__ ( self : List[str] ):
# prepare image, target and masks_path
__magic_name__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
__magic_name__ = json.loads(f.read() )
__magic_name__ = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
__magic_name__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
__magic_name__ = DetaImageProcessor(format='''coco_panoptic''' )
__magic_name__ = image_processing(images=a__ , annotations=a__ , masks_path=a__ , return_tensors='''pt''' )
# verify pixel values
__magic_name__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , a__ )
__magic_name__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , a__ , atol=1E-4 ) )
# verify area
__magic_name__ = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , a__ ) )
# verify boxes
__magic_name__ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , a__ )
__magic_name__ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , a__ , atol=1E-3 ) )
# verify image_id
__magic_name__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , a__ ) )
# verify is_crowd
__magic_name__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , a__ ) )
# verify class_labels
__magic_name__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , a__ ) )
# verify masks
__magic_name__ = 82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , a__ )
# verify orig_size
__magic_name__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , a__ ) )
# verify size
__magic_name__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , a__ ) )
| 711 |
'''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
def UpperCamelCase ( a=None , a=None ) -> Union[str, Any]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=a )
@dataclass
class _SCREAMING_SNAKE_CASE :
__SCREAMING_SNAKE_CASE :List[str] = list_field(
default=[] ,metadata={
"""help""": (
"""Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"""
""" of all available models"""
)
} ,)
__SCREAMING_SNAKE_CASE :List[int] = list_field(
default=[8] ,metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""} )
__SCREAMING_SNAKE_CASE :List[int] = list_field(
default=[8, 32, 128, 512] ,metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} ,)
__SCREAMING_SNAKE_CASE :bool = field(
default=__a ,metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} ,)
__SCREAMING_SNAKE_CASE :bool = field(
default=__a ,metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} ,)
__SCREAMING_SNAKE_CASE :bool = field(
default=__a ,metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""} )
__SCREAMING_SNAKE_CASE :bool = field(default=__a ,metadata={"""help""": """Use FP16 to accelerate inference."""} )
__SCREAMING_SNAKE_CASE :bool = field(default=__a ,metadata={"""help""": """Benchmark training of model"""} )
__SCREAMING_SNAKE_CASE :bool = field(default=__a ,metadata={"""help""": """Verbose memory tracing"""} )
__SCREAMING_SNAKE_CASE :bool = field(
default=__a ,metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} ,)
__SCREAMING_SNAKE_CASE :bool = field(
default=__a ,metadata={
"""help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"""
} ,)
__SCREAMING_SNAKE_CASE :bool = field(default=__a ,metadata={"""help""": """Trace memory line by line"""} )
__SCREAMING_SNAKE_CASE :bool = field(default=__a ,metadata={"""help""": """Save result to a CSV file"""} )
__SCREAMING_SNAKE_CASE :bool = field(default=__a ,metadata={"""help""": """Save all print statements in a log file"""} )
__SCREAMING_SNAKE_CASE :bool = field(default=__a ,metadata={"""help""": """Whether to print environment information"""} )
__SCREAMING_SNAKE_CASE :bool = field(
default=__a ,metadata={
"""help""": (
"""Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"""
""" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"""
""" for debugging / testing and on TPU."""
)
} ,)
__SCREAMING_SNAKE_CASE :str = field(
default=f'''inference_time_{round(time() )}.csv''' ,metadata={"""help""": """CSV filename used if saving time results to csv."""} ,)
__SCREAMING_SNAKE_CASE :str = field(
default=f'''inference_memory_{round(time() )}.csv''' ,metadata={"""help""": """CSV filename used if saving memory results to csv."""} ,)
__SCREAMING_SNAKE_CASE :str = field(
default=f'''train_time_{round(time() )}.csv''' ,metadata={"""help""": """CSV filename used if saving time results to csv for training."""} ,)
__SCREAMING_SNAKE_CASE :str = field(
default=f'''train_memory_{round(time() )}.csv''' ,metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} ,)
__SCREAMING_SNAKE_CASE :str = field(
default=f'''env_info_{round(time() )}.csv''' ,metadata={"""help""": """CSV filename used if saving environment information."""} ,)
__SCREAMING_SNAKE_CASE :str = field(
default=f'''log_{round(time() )}.csv''' ,metadata={"""help""": """Log filename used if print statements are saved in log."""} ,)
__SCREAMING_SNAKE_CASE :int = field(default=3 ,metadata={"""help""": """Times an experiment will be run."""} )
__SCREAMING_SNAKE_CASE :bool = field(
default=__a ,metadata={
"""help""": (
"""Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"""
""" model weights."""
)
} ,)
def snake_case__ ( self : Union[str, Any] ):
warnings.warn(
F'''The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'''
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , a__ , )
def snake_case__ ( self : Dict ):
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def snake_case__ ( self : Dict ):
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def snake_case__ ( self : Dict ):
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 245 | 0 |
'''simple docstring'''
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
__lowerCAmelCase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
class A ( UpperCAmelCase ):
def __init__( self : str , __a : List[Any] , __a : List[str]=7_6_8 ) -> List[str]:
super().__init__(__a )
__UpperCAmelCase = proj_size
__UpperCAmelCase = CLIPVisionModel(__a )
__UpperCAmelCase = PaintByExampleMapper(__a )
__UpperCAmelCase = nn.LayerNorm(config.hidden_size )
__UpperCAmelCase = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
__UpperCAmelCase = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def snake_case__ ( self : Any , __a : int , __a : int=False ) -> Optional[int]:
__UpperCAmelCase = self.model(pixel_values=__a )
__UpperCAmelCase = clip_output.pooler_output
__UpperCAmelCase = self.mapper(latent_states[:, None] )
__UpperCAmelCase = self.final_layer_norm(__a )
__UpperCAmelCase = self.proj_out(__a )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class A ( nn.Module ):
def __init__( self : List[Any] , __a : Dict ) -> Optional[int]:
super().__init__()
__UpperCAmelCase = (config.num_hidden_layers + 1) // 5
__UpperCAmelCase = config.hidden_size
__UpperCAmelCase = 1
__UpperCAmelCase = nn.ModuleList(
[
BasicTransformerBlock(__a , __a , __a , activation_fn='''gelu''' , attention_bias=__a )
for _ in range(__a )
] )
def snake_case__ ( self : Union[str, Any] , __a : Union[str, Any] ) -> Tuple:
for block in self.blocks:
__UpperCAmelCase = block(__a )
return hidden_states
| 262 | '''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
__lowerCAmelCase : int = logging.getLogger(__name__)
@dataclass
class A :
a_ = 42
a_ = 42
a_ = 42
@dataclass
class A :
a_ = 42
a_ = 42
a_ = None
a_ = None
class A ( UpperCAmelCase ):
a_ = '''train'''
a_ = '''dev'''
a_ = '''test'''
class A :
@staticmethod
def snake_case__ ( __a : List[Any] , __a : Union[Split, str] ) -> List[InputExample]:
raise NotImplementedError
@staticmethod
def snake_case__ ( __a : str ) -> List[str]:
raise NotImplementedError
@staticmethod
def snake_case__ ( __a : List[InputExample] , __a : List[str] , __a : int , __a : PreTrainedTokenizer , __a : Dict=False , __a : int="[CLS]" , __a : Dict=1 , __a : Tuple="[SEP]" , __a : Any=False , __a : Union[str, Any]=False , __a : Any=0 , __a : Optional[int]=0 , __a : Tuple=-1_0_0 , __a : Optional[Any]=0 , __a : int=True , ) -> List[InputFeatures]:
__UpperCAmelCase = {label: i for i, label in enumerate(__a )}
__UpperCAmelCase = []
for ex_index, example in enumerate(__a ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('''Writing example %d of %d''' , __a , len(__a ) )
__UpperCAmelCase = []
__UpperCAmelCase = []
for word, label in zip(example.words , example.labels ):
__UpperCAmelCase = tokenizer.tokenize(__a )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(__a ) > 0:
tokens.extend(__a )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__a ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
__UpperCAmelCase = tokenizer.num_special_tokens_to_add()
if len(__a ) > max_seq_length - special_tokens_count:
__UpperCAmelCase = tokens[: (max_seq_length - special_tokens_count)]
__UpperCAmelCase = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
__UpperCAmelCase = [sequence_a_segment_id] * len(__a )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
__UpperCAmelCase = [cls_token] + tokens
__UpperCAmelCase = [pad_token_label_id] + label_ids
__UpperCAmelCase = [cls_token_segment_id] + segment_ids
__UpperCAmelCase = tokenizer.convert_tokens_to_ids(__a )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
__UpperCAmelCase = [1 if mask_padding_with_zero else 0] * len(__a )
# Zero-pad up to the sequence length.
__UpperCAmelCase = max_seq_length - len(__a )
if pad_on_left:
__UpperCAmelCase = ([pad_token] * padding_length) + input_ids
__UpperCAmelCase = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
__UpperCAmelCase = ([pad_token_segment_id] * padding_length) + segment_ids
__UpperCAmelCase = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(__a ) == max_seq_length
assert len(__a ) == max_seq_length
assert len(__a ) == max_seq_length
assert len(__a ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(__a ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(__a ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(__a ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(__a ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(__a ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
__UpperCAmelCase = None
features.append(
InputFeatures(
input_ids=__a , attention_mask=__a , token_type_ids=__a , label_ids=__a ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class A ( UpperCAmelCase ):
a_ = 42
a_ = nn.CrossEntropyLoss().ignore_index
def __init__( self : List[Any] , __a : TokenClassificationTask , __a : str , __a : PreTrainedTokenizer , __a : List[str] , __a : str , __a : Optional[int] = None , __a : Dict=False , __a : Split = Split.train , ) -> Optional[int]:
# Load data features from cache or dataset file
__UpperCAmelCase = os.path.join(
__a , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(__a ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__UpperCAmelCase = cached_features_file + '''.lock'''
with FileLock(__a ):
if os.path.exists(__a ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
__UpperCAmelCase = torch.load(__a )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
__UpperCAmelCase = token_classification_task.read_examples_from_file(__a , __a )
# TODO clean up all this to leverage built-in features of tokenizers
__UpperCAmelCase = token_classification_task.convert_examples_to_features(
__a , __a , __a , __a , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__a , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , __a )
def __len__( self : List[Any] ) -> Union[str, Any]:
return len(self.features )
def __getitem__( self : int , __a : int ) -> InputFeatures:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class A :
a_ = 42
a_ = -1_0_0
def __init__( self : Union[str, Any] , __a : TokenClassificationTask , __a : str , __a : PreTrainedTokenizer , __a : List[str] , __a : str , __a : Optional[int] = None , __a : Any=False , __a : Split = Split.train , ) -> Union[str, Any]:
__UpperCAmelCase = token_classification_task.read_examples_from_file(__a , __a )
# TODO clean up all this to leverage built-in features of tokenizers
__UpperCAmelCase = token_classification_task.convert_examples_to_features(
__a , __a , __a , __a , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__a , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
__UpperCAmelCase = tf.data.Dataset.from_generator(
__a , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
__UpperCAmelCase = tf.data.Dataset.from_generator(
__a , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def snake_case__ ( self : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : int ) -> str:
return len(self.features )
def __getitem__( self : int , __a : List[Any] ) -> InputFeatures:
return self.features[i]
| 262 | 1 |
"""simple docstring"""
import requests
__a = "" # <-- Put your OpenWeatherMap appid here!
__a = "https://api.openweathermap.org/data/2.5/"
def A_ ( _lowercase = "Chicago", _lowercase = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + """weather""", params=locals() ).json()
def A_ ( _lowercase = "Kolkata, India", _lowercase = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + """forecast""", params=locals() ).json()
def A_ ( _lowercase = 55.68, _lowercase = 12.57, _lowercase = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + """onecall""", params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
__a = input("Enter a location:").strip()
if location:
pprint(current_weather(location))
else:
break
| 717 |
"""simple docstring"""
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__a = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
@register_to_config
def __init__( self: int , snake_case: bool , snake_case: Optional[int] = None , snake_case: Optional[int] = None ) -> Dict:
super().__init__()
snake_case_ :int = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
snake_case_ :str = torch.zeros(snake_case , snake_case )
else:
snake_case_ :Optional[int] = None
snake_case_ :Union[str, Any] = torch.nn.Parameter(snake_case )
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : VQModel
_A : CLIPTextModel
_A : CLIPTokenizer
_A : TransformeraDModel
_A : LearnedClassifierFreeSamplingEmbeddings
_A : VQDiffusionScheduler
def __init__( self: Any , snake_case: VQModel , snake_case: CLIPTextModel , snake_case: CLIPTokenizer , snake_case: TransformeraDModel , snake_case: VQDiffusionScheduler , snake_case: LearnedClassifierFreeSamplingEmbeddings , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
vqvae=snake_case , transformer=snake_case , text_encoder=snake_case , tokenizer=snake_case , scheduler=snake_case , learned_classifier_free_sampling_embeddings=snake_case , )
def lowerCAmelCase_ ( self: Tuple , snake_case: Union[str, Any] , snake_case: List[Any] , snake_case: List[str] ) -> Any:
snake_case_ :List[str] = len(snake_case ) if isinstance(snake_case , snake_case ) else 1
# get prompt text embeddings
snake_case_ :List[str] = self.tokenizer(
snake_case , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
snake_case_ :List[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
snake_case_ :int = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
snake_case_ :Optional[Any] = text_input_ids[:, : self.tokenizer.model_max_length]
snake_case_ :Any = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
snake_case_ :int = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=snake_case )
# duplicate text embeddings for each generation per prompt
snake_case_ :str = prompt_embeds.repeat_interleave(snake_case , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
snake_case_ :Optional[Any] = self.learned_classifier_free_sampling_embeddings.embeddings
snake_case_ :Any = negative_prompt_embeds.unsqueeze(0 ).repeat(snake_case , 1 , 1 )
else:
snake_case_ :Any = [""""""] * batch_size
snake_case_ :Optional[Any] = text_input_ids.shape[-1]
snake_case_ :Dict = self.tokenizer(
snake_case , padding="""max_length""" , max_length=snake_case , truncation=snake_case , return_tensors="""pt""" , )
snake_case_ :str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
snake_case_ :Union[str, Any] = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=snake_case )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case_ :Tuple = negative_prompt_embeds.shape[1]
snake_case_ :int = negative_prompt_embeds.repeat(1 , snake_case , 1 )
snake_case_ :int = negative_prompt_embeds.view(batch_size * num_images_per_prompt , snake_case , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case_ :str = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self: Dict , snake_case: Union[str, List[str]] , snake_case: int = 100 , snake_case: float = 5.0 , snake_case: float = 1.0 , snake_case: int = 1 , snake_case: Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case: Optional[torch.FloatTensor] = None , snake_case: Optional[str] = "pil" , snake_case: bool = True , snake_case: Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case: int = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(snake_case , snake_case ):
snake_case_ :Any = 1
elif isinstance(snake_case , snake_case ):
snake_case_ :int = len(snake_case )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(snake_case )}""" )
snake_case_ :Tuple = batch_size * num_images_per_prompt
snake_case_ :Optional[Any] = guidance_scale > 1.0
snake_case_ :Dict = self._encode_prompt(snake_case , snake_case , snake_case )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case , snake_case ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(snake_case )}.""" )
# get the initial completely masked latents unless the user supplied it
snake_case_ :List[str] = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
snake_case_ :Tuple = self.transformer.num_vector_embeds - 1
snake_case_ :Optional[int] = torch.full(snake_case , snake_case ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
f""" {self.transformer.num_vector_embeds - 1} (inclusive).""" )
snake_case_ :str = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(snake_case , device=self.device )
snake_case_ :Optional[Any] = self.scheduler.timesteps.to(self.device )
snake_case_ :List[Any] = latents
for i, t in enumerate(self.progress_bar(snake_case ) ):
# expand the sample if we are doing classifier free guidance
snake_case_ :List[Any] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
snake_case_ :Any = self.transformer(snake_case , encoder_hidden_states=snake_case , timestep=snake_case ).sample
if do_classifier_free_guidance:
snake_case_, snake_case_ :Optional[Any] = model_output.chunk(2 )
snake_case_ :Any = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(snake_case , dim=1 , keepdim=snake_case )
snake_case_ :str = self.truncate(snake_case , snake_case )
# remove `log(0)`'s (`-inf`s)
snake_case_ :List[str] = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
snake_case_ :Any = self.scheduler.step(snake_case , timestep=snake_case , sample=snake_case , generator=snake_case ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case , snake_case , snake_case )
snake_case_ :Optional[int] = self.vqvae.config.vq_embed_dim
snake_case_ :Tuple = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
snake_case_ :List[Any] = self.vqvae.quantize.get_codebook_entry(snake_case , shape=snake_case )
snake_case_ :Dict = self.vqvae.decode(snake_case , force_not_quantize=snake_case ).sample
snake_case_ :List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ :Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ :Any = self.numpy_to_pil(snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case )
def lowerCAmelCase_ ( self: int , snake_case: torch.FloatTensor , snake_case: float ) -> torch.FloatTensor:
snake_case_, snake_case_ :List[Any] = torch.sort(snake_case , 1 , descending=snake_case )
snake_case_ :Optional[int] = torch.exp(snake_case )
snake_case_ :int = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
snake_case_ :Union[str, Any] = torch.full_like(keep_mask[:, 0:1, :] , snake_case )
snake_case_ :List[str] = torch.cat((all_true, keep_mask) , dim=1 )
snake_case_ :List[str] = keep_mask[:, :-1, :]
snake_case_ :str = keep_mask.gather(1 , indices.argsort(1 ) )
snake_case_ :int = log_p_x_0.clone()
snake_case_ :List[Any] = -torch.inf # -inf = log(0)
return rv
| 310 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Dict = 'openai/whisper-base'
A : Optional[Any] = (
'This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '
'transcribed text.'
)
A : Dict = 'transcriber'
A : Any = WhisperProcessor
A : Any = WhisperForConditionalGeneration
A : Union[str, Any] = ['audio']
A : Optional[int] = ['text']
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
return self.pre_processor(_SCREAMING_SNAKE_CASE , return_tensors="pt" ).input_features
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
return self.model.generate(inputs=_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
return self.pre_processor.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )[0]
| 568 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowercase : Dict = logging.getLogger(__name__)
lowercase : Optional[Any] = '''pytorch_model.bin'''
@dataclasses.dataclass
class UpperCAmelCase_ :
'''simple docstring'''
A : str = dataclasses.field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} )
A : Optional[str] = dataclasses.field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , )
@dataclasses.dataclass
class UpperCAmelCase_ :
'''simple docstring'''
A : str = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} )
A : str = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} )
A : Optional[str] = dataclasses.field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A csv or a json file containing the validation data.'} )
A : Optional[str] = dataclasses.field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The name of the task to train on.'} , )
A : Optional[List[str]] = dataclasses.field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The list of labels for the task.'} )
@dataclasses.dataclass
class UpperCAmelCase_ :
'''simple docstring'''
A : str = dataclasses.field(
metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} )
A : Optional[str] = dataclasses.field(
default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'} )
A : Optional[str] = dataclasses.field(
default='no' , metadata={
'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'
} , )
A : Optional[int] = dataclasses.field(
default=10 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
A : Optional[float] = dataclasses.field(
default=0.0 , metadata={
'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.'
} , )
A : Optional[bool] = dataclasses.field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , )
A : Optional[bool] = dataclasses.field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , )
A : Optional[bool] = dataclasses.field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , )
A : Optional[float] = dataclasses.field(
default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , )
A : Optional[int] = dataclasses.field(
default=1_00 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
A : Optional[int] = dataclasses.field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Random seed for initialization.'} , )
def lowerCAmelCase__ ( _a : int , _a : List[Any] , _a : Optional[int] , _a : str , _a : str , _a : Optional[int] ):
snake_case_ : List[str] = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
snake_case_ : Dict = dataset.filter(lambda _a : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
snake_case_ : str = int(eval_result * len(_a ) )
print(_a )
snake_case_ : Dict = dataset.sort("probability" , reverse=_a )
snake_case_ : Any = dataset.select(range(_a ) )
snake_case_ : Optional[Any] = dataset.remove_columns(["label", "probability"] )
snake_case_ : Dict = dataset.rename_column("prediction" , "label" )
snake_case_ : int = dataset.map(lambda _a : {"label": idalabel[example["label"]]} )
snake_case_ : str = dataset.shuffle(seed=args.seed )
snake_case_ : int = os.path.join(_a , F'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(_a , index=_a )
else:
dataset.to_json(_a )
def lowerCAmelCase__ ( _a : Any , _a : List[str] , _a : int , _a : int , **_a : Tuple ):
snake_case_ : Any = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
snake_case_ : Tuple = STModelArguments(model_name_or_path=_a )
snake_case_ : List[str] = STDataArguments(train_file=_a , infer_file=_a )
snake_case_ : Any = STTrainingArguments(output_dir=_a )
snake_case_ : Optional[int] = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(_a ).items():
setattr(_a , _a , _a )
for key, value in kwargs.items():
if hasattr(_a , _a ):
setattr(_a , _a , _a )
# Sanity checks
snake_case_ : List[str] = {}
snake_case_ : List[str] = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
snake_case_ : List[str] = args.train_file
snake_case_ : Optional[int] = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
snake_case_ : Optional[int] = args.eval_file
for key in data_files:
snake_case_ : List[str] = data_files[key].split("." )[-1]
assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
snake_case_ : int = extension
else:
assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("Creating the initial data directory for self-training..." )
snake_case_ : Optional[Any] = F'''{args.output_dir}/self-train_iter-{{}}'''.format
snake_case_ : Optional[int] = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=_a )
os.makedirs(_a , exist_ok=_a )
accelerator.wait_for_everyone()
snake_case_ : Optional[int] = None
snake_case_ : List[Any] = None
snake_case_ : Any = 0
snake_case_ : Tuple = False
# Show the progress bar
snake_case_ : str = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
snake_case_ : Dict = data_dir_format(_a )
assert os.path.exists(_a )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
snake_case_ : List[Any] = os.path.join(_a , "stage-1" )
snake_case_ : Union[str, Any] = {
"accelerator": accelerator,
"model_name_or_path": args.model_name_or_path,
"cache_dir": args.cache_dir,
"do_train": True,
"train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"],
"do_eval": True if args.eval_file is not None else False,
"eval_file": data_files["eval"],
"do_predict": True,
"infer_file": data_files["infer"],
"task_name": args.task_name,
"label_list": args.label_list,
"output_dir": current_output_dir,
"eval_metric": args.eval_metric,
"evaluation_strategy": args.evaluation_strategy,
"early_stopping_patience": args.early_stopping_patience,
"early_stopping_threshold": args.early_stopping_threshold,
"seed": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(_a , _a ):
arguments_dict.update({key: value} )
snake_case_ : int = os.path.join(_a , "best-checkpoint" , _a )
if os.path.exists(_a ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , _a , _a , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , _a )
finetune(**_a )
accelerator.wait_for_everyone()
assert os.path.exists(_a )
logger.info("Self-training job completed: iteration: %d, stage: 1." , _a )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
snake_case_ : str = os.path.join(_a , "best-checkpoint" )
snake_case_ : Any = os.path.join(_a , "stage-2" )
# Update arguments_dict
snake_case_ : str = model_path
snake_case_ : Optional[int] = data_files["train"]
snake_case_ : Any = current_output_dir
snake_case_ : Tuple = os.path.join(_a , "best-checkpoint" , _a )
if os.path.exists(_a ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , _a , _a , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , _a )
finetune(**_a )
accelerator.wait_for_everyone()
assert os.path.exists(_a )
logger.info("Self-training job completed: iteration: %d, stage: 2." , _a )
snake_case_ : Tuple = iteration
snake_case_ : Optional[int] = data_dir_format(iteration + 1 )
snake_case_ : int = AutoConfig.from_pretrained(os.path.join(_a , "best-checkpoint" ) )
snake_case_ : str = config.idalabel
snake_case_ : Union[str, Any] = os.path.join(_a , "eval_results_best-checkpoint.json" )
snake_case_ : Any = os.path.join(_a , "test_results_best-checkpoint.json" )
assert os.path.exists(_a )
with open(_a , "r" ) as f:
snake_case_ : Union[str, Any] = float(json.load(_a )[args.eval_metric] )
snake_case_ : List[str] = os.path.join(_a , "infer_output_best-checkpoint.csv" )
assert os.path.exists(_a )
# Loading the dataset from local csv or json files.
snake_case_ : Optional[int] = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )["data"]
snake_case_ : Optional[int] = load_dataset("csv" , data_files={"data": infer_output_file} )["data"]
if accelerator.is_main_process:
os.makedirs(_a , exist_ok=_a )
shutil.copy(_a , os.path.join(_a , F'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(_a ):
shutil.copy(_a , os.path.join(_a , F'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(_a , _a , _a , _a , _a , _a )
accelerator.wait_for_everyone()
snake_case_ : Optional[Any] = os.path.join(_a , F'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
snake_case_ : Optional[int] = eval_result
if best_iteration is None:
snake_case_ : Optional[int] = new_iteration
snake_case_ : Dict = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
snake_case_ : Any = new_iteration
snake_case_ : Optional[Any] = new_eval_result
snake_case_ : Dict = 0
else:
if new_eval_result == best_eval_result:
snake_case_ : Optional[Any] = new_iteration
snake_case_ : Dict = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
snake_case_ : Optional[Any] = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("Best iteration: %d" , _a )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , _a )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_a , F'''eval_results_iter-{iteration}.json''' ) , os.path.join(_a , "eval_results_best-iteration.json" ) , )
else:
# Assume that the last iteration is the best
logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , _a )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_a , F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(_a , "eval_results_best-iteration.json" ) , )
| 568 | 1 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def _lowerCamelCase ( __A : np.ndarray , __A : Union[int, Iterable[int]] , __A : bool , __A : int ) -> Tuple[int, int]:
def constraint_to_multiple_of(__A : Dict , __A : Optional[Any] , __A : List[Any]=0 , __A : Tuple=None ):
_UpperCAmelCase : List[Any] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_UpperCAmelCase : Dict = math.floor(val / multiple ) * multiple
if x < min_val:
_UpperCAmelCase : Optional[Any] = math.ceil(val / multiple ) * multiple
return x
_UpperCAmelCase : Tuple = (output_size, output_size) if isinstance(__A , __A ) else output_size
_UpperCAmelCase , _UpperCAmelCase : List[str] = get_image_size(__A )
_UpperCAmelCase , _UpperCAmelCase : List[str] = output_size
# determine new height and width
_UpperCAmelCase : Dict = output_height / input_height
_UpperCAmelCase : int = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_UpperCAmelCase : Optional[Any] = scale_width
else:
# fit height
_UpperCAmelCase : Tuple = scale_height
_UpperCAmelCase : Dict = constraint_to_multiple_of(scale_height * input_height , multiple=__A )
_UpperCAmelCase : Dict = constraint_to_multiple_of(scale_width * input_width , multiple=__A )
return (new_height, new_width)
class A_ ( __lowercase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Any = ["pixel_values"]
def __init__( self , _A = True , _A = None , _A = PILImageResampling.BILINEAR , _A = False , _A = 1 , _A = True , _A = 1 / 255 , _A = True , _A = None , _A = None , **_A , ) -> None:
"""simple docstring"""
super().__init__(**_A)
_UpperCAmelCase : str = size if size is not None else {'''height''': 384, '''width''': 384}
_UpperCAmelCase : Optional[int] = get_size_dict(_A)
_UpperCAmelCase : int = do_resize
_UpperCAmelCase : Any = size
_UpperCAmelCase : List[Any] = keep_aspect_ratio
_UpperCAmelCase : List[Any] = ensure_multiple_of
_UpperCAmelCase : List[Any] = resample
_UpperCAmelCase : Optional[Any] = do_rescale
_UpperCAmelCase : Dict = rescale_factor
_UpperCAmelCase : str = do_normalize
_UpperCAmelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self , _A , _A , _A = False , _A = 1 , _A = PILImageResampling.BICUBIC , _A = None , **_A , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = get_size_dict(_A)
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''')
_UpperCAmelCase : Optional[Any] = get_resize_output_image_size(
_A , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=_A , multiple=_A , )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A)
def snake_case__ ( self , _A , _A , _A = None , **_A , ) -> List[str]:
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A)
def snake_case__ ( self , _A , _A , _A , _A = None , **_A , ) -> np.ndarray:
"""simple docstring"""
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A)
def snake_case__ ( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = ChannelDimension.FIRST , **_A , ) -> PIL.Image.Image:
"""simple docstring"""
_UpperCAmelCase : Dict = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase : Any = size if size is not None else self.size
_UpperCAmelCase : str = get_size_dict(_A)
_UpperCAmelCase : str = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_UpperCAmelCase : Tuple = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_UpperCAmelCase : int = resample if resample is not None else self.resample
_UpperCAmelCase : str = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase : int = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase : Any = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase : Any = image_std if image_std is not None else self.image_std
_UpperCAmelCase : int = make_list_of_images(_A)
if not valid_images(_A):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
# All transformations expect numpy arrays.
_UpperCAmelCase : List[str] = [to_numpy_array(_A) for image in images]
if do_resize:
_UpperCAmelCase : Dict = [self.resize(image=_A , size=_A , resample=_A) for image in images]
if do_rescale:
_UpperCAmelCase : Tuple = [self.rescale(image=_A , scale=_A) for image in images]
if do_normalize:
_UpperCAmelCase : int = [self.normalize(image=_A , mean=_A , std=_A) for image in images]
_UpperCAmelCase : Tuple = [to_channel_dimension_format(_A , _A) for image in images]
_UpperCAmelCase : Optional[Any] = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A)
def snake_case__ ( self , _A , _A = None) -> str:
"""simple docstring"""
_UpperCAmelCase : Tuple = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_A) != len(_A):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''')
if is_torch_tensor(_A):
_UpperCAmelCase : int = target_sizes.numpy()
_UpperCAmelCase : List[str] = []
for idx in range(len(_A)):
_UpperCAmelCase : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_A)
_UpperCAmelCase : Any = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(_A)
else:
_UpperCAmelCase : Tuple = logits.argmax(dim=1)
_UpperCAmelCase : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 186 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A_ ( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Dict = StableDiffusionXLImgaImgPipeline
_SCREAMING_SNAKE_CASE : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
_SCREAMING_SNAKE_CASE : List[Any] = PipelineTesterMixin.required_optional_params - {"latents"}
_SCREAMING_SNAKE_CASE : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_SCREAMING_SNAKE_CASE : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case__ ( self) -> Tuple:
"""simple docstring"""
torch.manual_seed(0)
_UpperCAmelCase : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=_A , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
_UpperCAmelCase : int = EulerDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0)
_UpperCAmelCase : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
_UpperCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=32 , )
_UpperCAmelCase : int = CLIPTextModel(_A)
_UpperCAmelCase : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=_A)
_UpperCAmelCase : int = CLIPTextModelWithProjection(_A)
_UpperCAmelCase : Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=_A)
_UpperCAmelCase : Any = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def snake_case__ ( self , _A , _A=0) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A)).to(_A)
_UpperCAmelCase : List[Any] = image / 2 + 0.5
if str(_A).startswith('''mps'''):
_UpperCAmelCase : Union[str, Any] = torch.manual_seed(_A)
else:
_UpperCAmelCase : List[str] = torch.Generator(device=_A).manual_seed(_A)
_UpperCAmelCase : Tuple = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.75,
}
return inputs
def snake_case__ ( self) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Optional[Any] = self.get_dummy_components()
_UpperCAmelCase : Any = StableDiffusionXLImgaImgPipeline(**_A)
_UpperCAmelCase : List[str] = sd_pipe.to(_A)
sd_pipe.set_progress_bar_config(disable=_A)
_UpperCAmelCase : Tuple = self.get_dummy_inputs(_A)
_UpperCAmelCase : Any = sd_pipe(**_A).images
_UpperCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase : Optional[Any] = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def snake_case__ ( self) -> Optional[Any]:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3)
def snake_case__ ( self) -> List[Any]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
def snake_case__ ( self) -> Optional[Any]:
"""simple docstring"""
pass
def snake_case__ ( self) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = self.get_dummy_components()
_UpperCAmelCase : str = StableDiffusionXLImgaImgPipeline(**_A)
_UpperCAmelCase : Any = sd_pipe.to(_A)
_UpperCAmelCase : Tuple = sd_pipe.to(_A)
sd_pipe.set_progress_bar_config(disable=_A)
# forward without prompt embeds
_UpperCAmelCase : str = self.get_dummy_inputs(_A)
_UpperCAmelCase : Optional[Any] = 3 * ['''this is a negative prompt''']
_UpperCAmelCase : Optional[int] = negative_prompt
_UpperCAmelCase : Optional[int] = 3 * [inputs['''prompt''']]
_UpperCAmelCase : Optional[Any] = sd_pipe(**_A)
_UpperCAmelCase : Any = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_UpperCAmelCase : Dict = self.get_dummy_inputs(_A)
_UpperCAmelCase : Optional[Any] = 3 * ['''this is a negative prompt''']
_UpperCAmelCase : Dict = 3 * [inputs.pop('''prompt''')]
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : str = sd_pipe.encode_prompt(_A , negative_prompt=_A)
_UpperCAmelCase : str = sd_pipe(
**_A , prompt_embeds=_A , negative_prompt_embeds=_A , pooled_prompt_embeds=_A , negative_pooled_prompt_embeds=_A , )
_UpperCAmelCase : List[str] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1e-4
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self , _A , _A="cpu" , _A=torch.floataa , _A=0) -> int:
"""simple docstring"""
_UpperCAmelCase : Tuple = torch.Generator(device=_A).manual_seed(_A)
_UpperCAmelCase : Any = np.random.RandomState(_A).standard_normal((1, 4, 64, 64))
_UpperCAmelCase : Dict = torch.from_numpy(_A).to(device=_A , dtype=_A)
_UpperCAmelCase : str = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : List[str] = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''')
pipe.to(_A)
pipe.set_progress_bar_config(disable=_A)
_UpperCAmelCase : str = self.get_inputs(_A)
_UpperCAmelCase : Union[str, Any] = pipe(**_A).images
_UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase : int = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506])
assert np.abs(image_slice - expected_slice).max() < 7e-3
| 186 | 1 |
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
lowercase__ : str = str(bin(UpperCAmelCase ) )[2:] # remove the leading "0b"
lowercase__ : int = str(bin(UpperCAmelCase ) )[2:]
lowercase__ : Optional[Any] = max(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase ) , b_binary.zfill(UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 152 | '''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = KandinskyVaaControlnetImgaImgPipeline
SCREAMING_SNAKE_CASE = ["image_embeds", "negative_image_embeds", "image", "hint"]
SCREAMING_SNAKE_CASE = ["image_embeds", "negative_image_embeds", "image", "hint"]
SCREAMING_SNAKE_CASE = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
SCREAMING_SNAKE_CASE = False
@property
def _lowerCAmelCase( self ) -> Optional[int]:
return 32
@property
def _lowerCAmelCase( self ) -> Optional[Any]:
return 32
@property
def _lowerCAmelCase( self ) -> List[Any]:
return self.time_input_dim
@property
def _lowerCAmelCase( self ) -> int:
return self.time_input_dim * 4
@property
def _lowerCAmelCase( self ) -> List[str]:
return 100
@property
def _lowerCAmelCase( self ) -> List[str]:
torch.manual_seed(0 )
lowercase__ : str = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowercase__ : Optional[int] = UNetaDConditionModel(**__lowerCAmelCase )
return model
@property
def _lowerCAmelCase( self ) -> str:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _lowerCAmelCase( self ) -> Any:
torch.manual_seed(0 )
lowercase__ : str = VQModel(**self.dummy_movq_kwargs )
return model
def _lowerCAmelCase( self ) -> Any:
lowercase__ : List[Any] = self.dummy_unet
lowercase__ : Optional[int] = self.dummy_movq
lowercase__ : List[str] = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
lowercase__ : Union[str, Any] = DDIMScheduler(**__lowerCAmelCase )
lowercase__ : List[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=0 ) -> Dict:
lowercase__ : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
lowercase__ : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowerCAmelCase )
# create init_image
lowercase__ : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
lowercase__ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ : int = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert('''RGB''' ).resize((256, 256) )
# create hint
lowercase__ : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
if str(__lowerCAmelCase ).startswith('''mps''' ):
lowercase__ : Dict = torch.manual_seed(__lowerCAmelCase )
else:
lowercase__ : Dict = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
lowercase__ : Dict = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : Optional[int] = '''cpu'''
lowercase__ : Dict = self.get_dummy_components()
lowercase__ : List[str] = self.pipeline_class(**__lowerCAmelCase )
lowercase__ : Any = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase__ : int = pipe(**self.get_dummy_inputs(__lowerCAmelCase ) )
lowercase__ : List[Any] = output.images
lowercase__ : str = pipe(
**self.get_dummy_inputs(__lowerCAmelCase ) , return_dict=__lowerCAmelCase , )[0]
lowercase__ : List[Any] = image[0, -3:, -3:, -1]
lowercase__ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ : int = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
lowercase__ : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowercase__ : List[Any] = init_image.resize((512, 512) )
lowercase__ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
lowercase__ : str = torch.from_numpy(np.array(__lowerCAmelCase ) ).float() / 2_5_5.0
lowercase__ : List[str] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
lowercase__ : Union[str, Any] = '''A robot, 4k photo'''
lowercase__ : int = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCAmelCase )
lowercase__ : Dict = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
lowercase__ : Optional[Any] = pipeline.to(__lowerCAmelCase )
pipeline.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase__ : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase__ , lowercase__ : Optional[Any] = pipe_prior(
__lowerCAmelCase , image=__lowerCAmelCase , strength=0.8_5 , generator=__lowerCAmelCase , negative_prompt='''''' , ).to_tuple()
lowercase__ : Tuple = pipeline(
image=__lowerCAmelCase , image_embeds=__lowerCAmelCase , negative_image_embeds=__lowerCAmelCase , hint=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , )
lowercase__ : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
| 152 | 1 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_UpperCamelCase : Any = logging.getLogger(__name__)
@dataclass
class _snake_case :
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=a_ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
SCREAMING_SNAKE_CASE : bool = field(
default=a_ , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=a_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=a_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=a_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
@dataclass
class _snake_case :
SCREAMING_SNAKE_CASE : str = field(
default=a_ , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE : str = field(
default=a_ , metadata={'''help''': '''Evaluation language. Also train language if `train_language` is set to None.'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=a_ , metadata={'''help''': '''Train language if it is different from the evaluation language.'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=a_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=a_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=a_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
SCREAMING_SNAKE_CASE : Optional[bool] = field(
default=a_ , metadata={'''help''': '''arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'''} , )
SCREAMING_SNAKE_CASE : bool = field(
default=a_ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
SCREAMING_SNAKE_CASE : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
SCREAMING_SNAKE_CASE : bool = field(
default=a_ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=a_ , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def snake_case ( ) -> str:
"""simple docstring"""
lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_xnli' , snake_case )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(snake_case )
datasets.utils.logging.set_verbosity(snake_case )
transformers.utils.logging.set_verbosity(snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowerCAmelCase = load_dataset(
'xnli' , model_args.language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowerCAmelCase = load_dataset(
'xnli' , model_args.train_language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase = train_dataset.features['label'].names
if training_args.do_eval:
lowerCAmelCase = load_dataset(
'xnli' , model_args.language , split='validation' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase = eval_dataset.features['label'].names
if training_args.do_predict:
lowerCAmelCase = load_dataset(
'xnli' , model_args.language , split='test' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase = predict_dataset.features['label'].names
# Labels
lowerCAmelCase = len(snake_case )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=snake_case , idalabel={str(snake_case ): label for i, label in enumerate(snake_case )} , labelaid={label: i for i, label in enumerate(snake_case )} , finetuning_task='xnli' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowerCAmelCase = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCAmelCase = False
def preprocess_function(snake_case : Optional[Any] ):
# Tokenize the texts
return tokenizer(
examples['premise'] , examples['hypothesis'] , padding=snake_case , max_length=data_args.max_seq_length , truncation=snake_case , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCAmelCase = min(len(snake_case ) , data_args.max_train_samples )
lowerCAmelCase = train_dataset.select(range(snake_case ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
lowerCAmelCase = train_dataset.map(
snake_case , batched=snake_case , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on train dataset' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(snake_case ) ) , 3 ):
logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCAmelCase = min(len(snake_case ) , data_args.max_eval_samples )
lowerCAmelCase = eval_dataset.select(range(snake_case ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
lowerCAmelCase = eval_dataset.map(
snake_case , batched=snake_case , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on validation dataset' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowerCAmelCase = min(len(snake_case ) , data_args.max_predict_samples )
lowerCAmelCase = predict_dataset.select(range(snake_case ) )
with training_args.main_process_first(desc='prediction dataset map pre-processing' ):
lowerCAmelCase = predict_dataset.map(
snake_case , batched=snake_case , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on prediction dataset' , )
# Get the metric function
lowerCAmelCase = evaluate.load('xnli' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(snake_case : EvalPrediction ):
lowerCAmelCase = p.predictions[0] if isinstance(p.predictions , snake_case ) else p.predictions
lowerCAmelCase = np.argmax(snake_case , axis=1 )
return metric.compute(predictions=snake_case , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCAmelCase = default_data_collator
elif training_args.fpaa:
lowerCAmelCase = DataCollatorWithPadding(snake_case , pad_to_multiple_of=8 )
else:
lowerCAmelCase = None
# Initialize our Trainer
lowerCAmelCase = Trainer(
model=snake_case , args=snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=snake_case , tokenizer=snake_case , data_collator=snake_case , )
# Training
if training_args.do_train:
lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase = last_checkpoint
lowerCAmelCase = trainer.train(resume_from_checkpoint=snake_case )
lowerCAmelCase = train_result.metrics
lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(snake_case )
)
lowerCAmelCase = min(snake_case , len(snake_case ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , snake_case )
trainer.save_metrics('train' , snake_case )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCAmelCase = trainer.evaluate(eval_dataset=snake_case )
lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(snake_case )
lowerCAmelCase = min(snake_case , len(snake_case ) )
trainer.log_metrics('eval' , snake_case )
trainer.save_metrics('eval' , snake_case )
# Prediction
if training_args.do_predict:
logger.info('*** Predict ***' )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = trainer.predict(snake_case , metric_key_prefix='predict' )
lowerCAmelCase = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(snake_case )
)
lowerCAmelCase = min(snake_case , len(snake_case ) )
trainer.log_metrics('predict' , snake_case )
trainer.save_metrics('predict' , snake_case )
lowerCAmelCase = np.argmax(snake_case , axis=1 )
lowerCAmelCase = os.path.join(training_args.output_dir , 'predictions.txt' )
if trainer.is_world_process_zero():
with open(snake_case , 'w' ) as writer:
writer.write('index\tprediction\n' )
for index, item in enumerate(snake_case ):
lowerCAmelCase = label_list[item]
writer.write(F'{index}\t{item}\n' )
if __name__ == "__main__":
main()
| 514 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _snake_case :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=32 * 8 , _SCREAMING_SNAKE_CASE=32 * 8 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=64 , ):
'''simple docstring'''
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = is_training
lowerCAmelCase = use_auxiliary_loss
lowerCAmelCase = num_queries
lowerCAmelCase = num_channels
lowerCAmelCase = min_size
lowerCAmelCase = max_size
lowerCAmelCase = num_labels
lowerCAmelCase = hidden_dim
lowerCAmelCase = hidden_dim
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_SCREAMING_SNAKE_CASE ) > 0.5
).float()
lowerCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=_SCREAMING_SNAKE_CASE ) > 0.5).long()
lowerCAmelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
lowerCAmelCase = self.num_queries
lowerCAmelCase = self.num_labels
lowerCAmelCase = [1, 1, 1, 1]
lowerCAmelCase = self.num_channels
lowerCAmelCase = 64
lowerCAmelCase = 1_28
lowerCAmelCase = self.hidden_dim
lowerCAmelCase = self.hidden_dim
lowerCAmelCase = self.hidden_dim
return config
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = output.encoder_hidden_states
lowerCAmelCase = output.pixel_decoder_hidden_states
lowerCAmelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) , config.decoder_layers )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
with torch.no_grad():
lowerCAmelCase = MaskaFormerModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(pixel_values=_SCREAMING_SNAKE_CASE , pixel_mask=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = MaskaFormerForUniversalSegmentation(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
def comm_check_on_output(_SCREAMING_SNAKE_CASE ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCAmelCase = model(pixel_values=_SCREAMING_SNAKE_CASE , pixel_mask=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(_SCREAMING_SNAKE_CASE )
comm_check_on_output(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(
pixel_values=_SCREAMING_SNAKE_CASE , pixel_mask=_SCREAMING_SNAKE_CASE , mask_labels=_SCREAMING_SNAKE_CASE , class_labels=_SCREAMING_SNAKE_CASE )
comm_check_on_output(_SCREAMING_SNAKE_CASE )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _snake_case ( a_ , a_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE : Tuple = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : int = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = MaskaFormerModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_SCREAMING_SNAKE_CASE )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
lowerCAmelCase = MaskaFormerModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = (self.model_tester.min_size,) * 2
lowerCAmelCase = {
'pixel_values': torch.randn((2, 3, *size) , device=_SCREAMING_SNAKE_CASE ),
'mask_labels': torch.randn((2, 10, *size) , device=_SCREAMING_SNAKE_CASE ),
'class_labels': torch.zeros(2 , 10 , device=_SCREAMING_SNAKE_CASE ).long(),
}
lowerCAmelCase = self.model_tester.get_config()
lowerCAmelCase = MaskaFormerForUniversalSegmentation(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(**_SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.loss is not None )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(**_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.attentions is not None )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
lowerCAmelCase = self.all_model_classes[1]
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.train()
lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , mask_labels=_SCREAMING_SNAKE_CASE , class_labels=_SCREAMING_SNAKE_CASE ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.all_model_classes[1]
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = model_class(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
model.train()
lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , mask_labels=_SCREAMING_SNAKE_CASE , class_labels=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
lowerCAmelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_UpperCamelCase : Union[str, Any] = 1e-4
def snake_case ( ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class _snake_case ( unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_SCREAMING_SNAKE_CASE , (1, 3, 3_84, 3_84) )
with torch.no_grad():
lowerCAmelCase = model(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.tensor(
[[-0.2_790, -1.0_717, -1.1_668], [-0.5_128, -0.3_128, -0.4_987], [-0.5_832, 0.1_971, -0.0_197]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = torch.tensor(
[[0.8_973, 1.1_847, 1.1_776], [1.1_934, 1.5_040, 1.5_128], [1.1_153, 1.4_486, 1.4_951]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = torch.tensor(
[[2.1_152, 1.7_000, -0.8_603], [1.5_808, 1.8_004, -0.9_353], [1.6_043, 1.7_495, -0.5_999]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_SCREAMING_SNAKE_CASE ).eval()
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_SCREAMING_SNAKE_CASE , (1, 3, 3_84, 3_84) )
with torch.no_grad():
lowerCAmelCase = model(**_SCREAMING_SNAKE_CASE )
# masks_queries_logits
lowerCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
lowerCAmelCase = [
[-8.7_839, -9.0_056, -8.8_121],
[-7.4_104, -7.0_313, -6.5_401],
[-6.6_105, -6.3_427, -6.4_675],
]
lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
# class_queries_logits
lowerCAmelCase = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase = torch.tensor(
[
[1.8_324, -8.0_835, -4.1_922],
[0.8_450, -9.0_050, -3.6_053],
[0.3_045, -7.7_293, -3.0_275],
] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_SCREAMING_SNAKE_CASE ).eval()
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='pt' , )
lowerCAmelCase = inputs['pixel_values'].to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = [el.to(_SCREAMING_SNAKE_CASE ) for el in inputs['mask_labels']]
lowerCAmelCase = [el.to(_SCREAMING_SNAKE_CASE ) for el in inputs['class_labels']]
with torch.no_grad():
lowerCAmelCase = model(**_SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.loss is not None )
| 514 | 1 |
'''simple docstring'''
from math import isqrt, loga
def _lowerCAmelCase ( lowercase ) -> list[int]:
__lowerCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowercase , lowercase ):
__lowerCAmelCase = False
return [i for i in range(2 , lowercase ) if is_prime[i]]
def _lowerCAmelCase ( lowercase = 80_0800 , lowercase = 80_0800 ) -> int:
__lowerCAmelCase = degree * loga(lowercase )
__lowerCAmelCase = int(lowercase )
__lowerCAmelCase = calculate_prime_numbers(lowercase )
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = len(lowercase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f'{solution() = }')
| 689 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _lowerCAmelCase ( ) -> Union[str, Any]:
__lowerCAmelCase = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
__lowerCAmelCase = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(lowercase )
# Let's go
__lowerCAmelCase = parser.parse_args()
if not hasattr(lowercase , """func""" ):
parser.print_help()
exit(1 )
# Run
__lowerCAmelCase = args.func(lowercase )
service.run()
if __name__ == "__main__":
main()
| 689 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase : List[Any] = {
"configuration_graphormer": ["GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "GraphormerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Any = [
"GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"GraphormerForGraphClassification",
"GraphormerModel",
"GraphormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 57 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> List[Any]:
# A mock response for an HTTP head request to emulate server down
snake_case__ :Tuple = mock.Mock()
snake_case__ :List[str] = 500
snake_case__ :Any = {}
snake_case__ :Union[str, Any] = HTTPError
snake_case__ :Tuple = {}
# Download this model to make sure it's in the cache.
snake_case__ :Any = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head:
snake_case__ :Dict = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def lowerCAmelCase_ ( self ) -> Dict:
# A mock response for an HTTP head request to emulate server down
snake_case__ :Union[str, Any] = mock.Mock()
snake_case__ :int = 500
snake_case__ :Any = {}
snake_case__ :Dict = HTTPError
snake_case__ :List[Any] = {}
# Download this model to make sure it's in the cache.
snake_case__ :Optional[int] = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head:
snake_case__ :Any = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase_ ( self ) -> int:
# This test is for deprecated behavior and can be removed in v5
try:
snake_case__ :Union[str, Any] = tempfile.mktemp()
with open(UpperCamelCase ,"wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" ,UpperCamelCase )
snake_case__ :Tuple = AlbertTokenizer.from_pretrained(UpperCamelCase )
finally:
os.remove(UpperCamelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" ,"wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" ,UpperCamelCase )
snake_case__ :Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
# This test is for deprecated behavior and can be removed in v5
snake_case__ :Union[str, Any] = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class _snake_case ( unittest.TestCase ):
_A = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def lowerCAmelCase_ ( cls ) -> Optional[int]:
snake_case__ :List[str] = TOKEN
HfFolder.save_token(UpperCamelCase )
@classmethod
def lowerCAmelCase_ ( cls ) -> Union[str, Any]:
try:
delete_repo(token=cls._token ,repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def lowerCAmelCase_ ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :List[str] = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :str = BertTokenizer(UpperCamelCase )
tokenizer.push_to_hub("test-tokenizer" ,use_auth_token=self._token )
snake_case__ :Dict = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase ,repo_id="test-tokenizer" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token )
snake_case__ :List[str] = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def lowerCAmelCase_ ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :List[Any] = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Any = BertTokenizer(UpperCamelCase )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" ,use_auth_token=self._token )
snake_case__ :Any = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
UpperCamelCase ,repo_id="valid_org/test-tokenizer-org" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token )
snake_case__ :Union[str, Any] = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def lowerCAmelCase_ ( self ) -> Any:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :str = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Optional[int] = CustomTokenizer(UpperCamelCase )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token )
snake_case__ :Union[str, Any] = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :int = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Tuple = BertTokenizerFast.from_pretrained(UpperCamelCase )
bert_tokenizer.save_pretrained(UpperCamelCase )
snake_case__ :List[Any] = CustomTokenizerFast.from_pretrained(UpperCamelCase )
tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token )
snake_case__ :List[Any] = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizerFast" )
snake_case__ :List[str] = AutoTokenizer.from_pretrained(
f'{USER}/test-dynamic-tokenizer' ,use_fast=UpperCamelCase ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" )
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :int = Trie()
trie.add("Hello ๅ้" )
self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {" ": {"ๅ": {"้": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"ๅ": {"้": {"": 1}}}}}}}}} )
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :List[str] = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS]", " This is a ", "extra_id_100"] )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :Optional[Any] = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) ,["A", "BC"] )
self.assertEqual(trie.split("BCA" ) ,["BC", "A"] )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :Any = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :List[Any] = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :str = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) ,["AB", "C"] )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Dict = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) ,["ABC", "D"] )
def lowerCAmelCase_ ( self ) -> int:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
snake_case__ :Optional[int] = Trie()
snake_case__ :Union[str, Any] = trie.cut_text("ABC" ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(UpperCamelCase ,["AB", "C"] ) | 57 | 1 |
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =model.config
UpperCAmelCase_ =DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 1_6, 3_2] , window_size=original_config.window_size , embed_dim=1_2_8 , )
UpperCAmelCase_ =MBartConfig(
is_decoder=lowercase__ , is_encoder_decoder=lowercase__ , add_cross_attention=lowercase__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=lowercase__ , add_final_layer_norm=lowercase__ , )
return encoder_config, decoder_config
def a__ ( lowercase__ ):
'''simple docstring'''
if "encoder.model" in name:
UpperCAmelCase_ =name.replace("encoder.model" , "encoder" )
if "decoder.model" in name:
UpperCAmelCase_ =name.replace("decoder.model" , "decoder" )
if "patch_embed.proj" in name:
UpperCAmelCase_ =name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
UpperCAmelCase_ =name.replace("patch_embed.norm" , "embeddings.norm" )
if name.startswith("encoder" ):
if "layers" in name:
UpperCAmelCase_ ="encoder." + name
if "attn.proj" in name:
UpperCAmelCase_ =name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "mask" not in name:
UpperCAmelCase_ =name.replace("attn" , "attention.self" )
if "norm1" in name:
UpperCAmelCase_ =name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCAmelCase_ =name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase_ =name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase_ =name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
UpperCAmelCase_ ="encoder.layernorm.weight"
if name == "encoder.norm.bias":
UpperCAmelCase_ ="encoder.layernorm.bias"
return name
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ =orig_state_dict.pop(lowercase__ )
if "qkv" in key:
UpperCAmelCase_ =key.split("." )
UpperCAmelCase_ =int(key_split[3] )
UpperCAmelCase_ =int(key_split[5] )
UpperCAmelCase_ =model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCAmelCase_ =val[:dim, :]
UpperCAmelCase_ =val[dim : dim * 2, :]
UpperCAmelCase_ =val[-dim:, :]
else:
UpperCAmelCase_ =val[:dim]
UpperCAmelCase_ =val[dim : dim * 2]
UpperCAmelCase_ =val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
UpperCAmelCase_ =val
return orig_state_dict
def a__ ( lowercase__ , lowercase__=None , lowercase__=False ):
'''simple docstring'''
UpperCAmelCase_ =DonutModel.from_pretrained(lowercase__ ).eval()
# load HuggingFace model
UpperCAmelCase_ , UpperCAmelCase_ =get_configs(lowercase__ )
UpperCAmelCase_ =DonutSwinModel(lowercase__ )
UpperCAmelCase_ =MBartForCausalLM(lowercase__ )
UpperCAmelCase_ =VisionEncoderDecoderModel(encoder=lowercase__ , decoder=lowercase__ )
model.eval()
UpperCAmelCase_ =original_model.state_dict()
UpperCAmelCase_ =convert_state_dict(lowercase__ , lowercase__ )
model.load_state_dict(lowercase__ )
# verify results on scanned document
UpperCAmelCase_ =load_dataset("hf-internal-testing/example-documents" )
UpperCAmelCase_ =dataset["test"][0]["image"].convert("RGB" )
UpperCAmelCase_ =XLMRobertaTokenizerFast.from_pretrained(lowercase__ , from_slow=lowercase__ )
UpperCAmelCase_ =DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
UpperCAmelCase_ =DonutProcessor(lowercase__ , lowercase__ )
UpperCAmelCase_ =processor(lowercase__ , return_tensors="pt" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
UpperCAmelCase_ ="<s_docvqa><s_question>{user_input}</s_question><s_answer>"
UpperCAmelCase_ ="When is the coffee break?"
UpperCAmelCase_ =task_prompt.replace("{user_input}" , lowercase__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
UpperCAmelCase_ ="<s_rvlcdip>"
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
UpperCAmelCase_ ="<s_cord>"
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
UpperCAmelCase_ ="s_cord-v2>"
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
UpperCAmelCase_ ="<s_zhtrainticket>"
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
UpperCAmelCase_ ="hello world"
else:
raise ValueError("Model name not supported" )
UpperCAmelCase_ =original_model.decoder.tokenizer(lowercase__ , add_special_tokens=lowercase__ , return_tensors="pt" )[
"input_ids"
]
UpperCAmelCase_ =original_model.encoder.model.patch_embed(lowercase__ )
UpperCAmelCase_ , UpperCAmelCase_ =model.encoder.embeddings(lowercase__ )
assert torch.allclose(lowercase__ , lowercase__ , atol=1E-3 )
# verify encoder hidden states
UpperCAmelCase_ =original_model.encoder(lowercase__ )
UpperCAmelCase_ =model.encoder(lowercase__ ).last_hidden_state
assert torch.allclose(lowercase__ , lowercase__ , atol=1E-2 )
# verify decoder hidden states
UpperCAmelCase_ =original_model(lowercase__ , lowercase__ , lowercase__ ).logits
UpperCAmelCase_ =model(lowercase__ , decoder_input_ids=lowercase__ ).logits
assert torch.allclose(lowercase__ , lowercase__ , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase__ )
processor.save_pretrained(lowercase__ )
if push_to_hub:
model.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
processor.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
if __name__ == "__main__":
__lowercase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""naver-clova-ix/donut-base-finetuned-docvqa""",
required=False,
type=str,
help="""Name of the original model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
required=False,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the ๐ค hub.""",
)
__lowercase : str =parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 54 |
def __lowerCAmelCase ( A , A , A , A ):
# Return True if there is node that has not iterated.
UpperCAmelCase_ = [False] * len(A )
UpperCAmelCase_ = []
queue.append(A )
UpperCAmelCase_ = True
while queue:
UpperCAmelCase_ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(A )
UpperCAmelCase_ = True
UpperCAmelCase_ = u
return visited[t]
def __lowerCAmelCase ( A , A , A ):
# This array is filled by BFS and to store path
UpperCAmelCase_ = [-1] * (len(A ))
UpperCAmelCase_ = 0
while bfs(A , A , A , A ):
UpperCAmelCase_ = float("Inf" )
UpperCAmelCase_ = sink
while s != source:
# Find the minimum value in select path
UpperCAmelCase_ = min(A , graph[parent[s]][s] )
UpperCAmelCase_ = parent[s]
max_flow += path_flow
UpperCAmelCase_ = sink
while v != source:
UpperCAmelCase_ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCAmelCase_ = parent[v]
return max_flow
_a: Any = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_a , _a: Optional[int] = 0, 5
print(ford_fulkerson(graph, source, sink)) | 162 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_50, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_00, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_00, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self ) -> str:
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='utf-8' , check=lowerCAmelCase_ , )
assert hasattr(self , 'env' )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> List[str]:
__a = f'''{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'''
# distributed data settings
__a = {'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=lowerCAmelCase_ , instance_count=lowerCAmelCase_ , instance_type=self.instance_type , debugger_hook_config=lowerCAmelCase_ , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=lowerCAmelCase_ , py_version='py36' , )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> Optional[int]:
TrainingJobAnalytics(lowerCAmelCase_ ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> int:
__a = self.create_estimator(lowerCAmelCase_ )
# run training
estimator.fit()
# result dataframe
__a = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__a = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
__a = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__a = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , lowerCAmelCase_ )
| 719 | from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class a__ :
A__ : List[Any] = MBartConfig
A__ : Any = {}
A__ : List[str] = 'gelu'
def __init__( self , UpperCAmelCase , UpperCAmelCase=1_3 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=9_9 , UpperCAmelCase=3_2 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=3_7 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=2_0 , UpperCAmelCase=2 , UpperCAmelCase=1 , UpperCAmelCase=0 , ) -> List[str]:
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = eos_token_id
__a = pad_token_id
__a = bos_token_id
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
__a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__a = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__a = tf.concat([input_ids, eos_tensor] , axis=1 )
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__a = prepare_mbart_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase ) -> int:
__a = TFMBartModel(config=UpperCAmelCase ).get_decoder()
__a = inputs_dict['input_ids']
__a = input_ids[:1, :]
__a = inputs_dict['attention_mask'][:1, :]
__a = inputs_dict['head_mask']
__a = 1
# first forward pass
__a = model(UpperCAmelCase , attention_mask=UpperCAmelCase , head_mask=UpperCAmelCase , use_cache=UpperCAmelCase )
__a , __a = outputs.to_tuple()
__a = past_key_values[1]
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , ):
if attention_mask is None:
__a = tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__a = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__a = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__a = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__a = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a__ ( __snake_case , __snake_case , unittest.TestCase ):
A__ : List[Any] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
A__ : Any = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
A__ : List[str] = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
A__ : int = True
A__ : List[str] = False
A__ : Union[str, Any] = False
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
__a = TFMBartModelTester(self )
__a = ConfigTester(self , config_class=UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
__a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class a__ ( unittest.TestCase ):
A__ : Optional[Any] = [
' UN Chief Says There Is No Military Solution in Syria',
]
A__ : List[Any] = [
'ลeful ONU declarฤ cฤ nu existฤ o soluลฃie militarฤ รฎn Siria',
]
A__ : List[Any] = 'facebook/mbart-large-en-ro'
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
__a = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __SCREAMING_SNAKE_CASE ( self , **UpperCAmelCase ) -> Dict:
__a = self.translate_src_text(**UpperCAmelCase )
self.assertListEqual(self.expected_text , UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , **UpperCAmelCase ) -> int:
__a = self.tokenizer(self.src_text , **UpperCAmelCase , return_tensors='tf' )
__a = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
__a = self.tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
return generated_words
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
self._assert_generated_batch_equal_expected()
| 246 | 0 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Any = DistilBertTokenizer
lowerCamelCase :Optional[Any] = DistilBertTokenizerFast
lowerCamelCase :Union[str, Any] = True
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
_A = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
_A = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCAmelCase_ )
_A = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCAmelCase_ )
_A = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
_A = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 401 | import math
def snake_case ( snake_case__ :int) -> list:
_A = [True] * n
_A = False
_A = False
_A = True
for i in range(3 , int(n**0.5 + 1) , 2):
_A = i * 2
while index < n:
_A = False
_A = index + i
_A = [2]
for i in range(3 , snake_case__ , 2):
if is_prime[i]:
primes.append(snake_case__)
return primes
def snake_case ( snake_case__ :int = 999_966_663_333) -> int:
_A = math.floor(math.sqrt(snake_case__)) + 100
_A = prime_sieve(snake_case__)
_A = 0
_A = 0
_A = primes[prime_index]
while (last_prime**2) <= limit:
_A = primes[prime_index + 1]
_A = last_prime**2
_A = next_prime**2
# Get numbers divisible by lps(current)
_A = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
_A = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
_A = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
_A = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 401 | 1 |
def a__ ( _UpperCamelCase : list ):
if len(_UpperCamelCase ) < 2:
return collection
def circle_sort_util(_UpperCamelCase : list ,_UpperCamelCase : int ,_UpperCamelCase : int ) -> bool:
__lowerCamelCase = False
if low == high:
return swapped
__lowerCamelCase = low
__lowerCamelCase = high
while left < right:
if collection[left] > collection[right]:
__lowerCamelCase ,__lowerCamelCase = (
collection[right],
collection[left],
)
__lowerCamelCase = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
__lowerCamelCase ,__lowerCamelCase = (
collection[right + 1],
collection[left],
)
__lowerCamelCase = True
__lowerCamelCase = low + int((high - low) / 2 )
__lowerCamelCase = circle_sort_util(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase = circle_sort_util(_UpperCamelCase ,mid + 1 ,_UpperCamelCase )
return swapped or left_swap or right_swap
__lowerCamelCase = True
while is_not_sorted is True:
__lowerCamelCase = circle_sort_util(_UpperCamelCase ,0 ,len(_UpperCamelCase ) - 1 )
return collection
if __name__ == "__main__":
a_ = input("""Enter numbers separated by a comma:\n""").strip()
a_ = [int(item) for item in user_input.split(""",""")]
print(circle_sort(unsorted))
| 622 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
a_ = """true"""
def a__ ( _UpperCamelCase : Union[str, Any] ,_UpperCamelCase : List[str]=82 ,_UpperCamelCase : Optional[Any]=16 ):
set_seed(42 )
__lowerCamelCase = RegressionModel()
__lowerCamelCase = deepcopy(_UpperCamelCase )
__lowerCamelCase = RegressionDataset(length=_UpperCamelCase )
__lowerCamelCase = DataLoader(_UpperCamelCase ,batch_size=_UpperCamelCase )
model.to(accelerator.device )
__lowerCamelCase ,__lowerCamelCase = accelerator.prepare(_UpperCamelCase ,_UpperCamelCase )
return model, ddp_model, dataloader
def a__ ( _UpperCamelCase : Accelerator ,_UpperCamelCase : str=False ):
__lowerCamelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
__lowerCamelCase = load_dataset('''glue''' ,'''mrpc''' ,split='''validation''' )
def tokenize_function(_UpperCamelCase : int ):
__lowerCamelCase = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=_UpperCamelCase ,max_length=_UpperCamelCase )
return outputs
with accelerator.main_process_first():
__lowerCamelCase = dataset.map(
_UpperCamelCase ,batched=_UpperCamelCase ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,)
__lowerCamelCase = tokenized_datasets.rename_column('''label''' ,'''labels''' )
def collate_fn(_UpperCamelCase : Any ):
if use_longest:
return tokenizer.pad(_UpperCamelCase ,padding='''longest''' ,return_tensors='''pt''' )
return tokenizer.pad(_UpperCamelCase ,padding='''max_length''' ,max_length=1_28 ,return_tensors='''pt''' )
return DataLoader(_UpperCamelCase ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=16 )
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : List[str] ):
__lowerCamelCase = Accelerator(dispatch_batches=_UpperCamelCase ,split_batches=_UpperCamelCase )
__lowerCamelCase = get_dataloader(_UpperCamelCase ,not dispatch_batches )
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' ,return_dict=_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = accelerator.prepare(_UpperCamelCase ,_UpperCamelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Union[str, Any] ):
__lowerCamelCase = []
for batch in dataloader:
__lowerCamelCase ,__lowerCamelCase = batch.values()
with torch.no_grad():
__lowerCamelCase = model(_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__lowerCamelCase ,__lowerCamelCase = [], []
for logit, targ in logits_and_targets:
logits.append(_UpperCamelCase )
targs.append(_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = torch.cat(_UpperCamelCase ), torch.cat(_UpperCamelCase )
return logits, targs
def a__ ( _UpperCamelCase : Accelerator ,_UpperCamelCase : List[Any]=82 ,_UpperCamelCase : str=False ,_UpperCamelCase : List[str]=False ,_UpperCamelCase : Optional[int]=16 ):
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = get_basic_setup(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = generate_predictions(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
assert (
len(_UpperCamelCase ) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_UpperCamelCase )}"""
def a__ ( _UpperCamelCase : bool = False ,_UpperCamelCase : bool = False ):
__lowerCamelCase = evaluate.load('''glue''' ,'''mrpc''' )
__lowerCamelCase ,__lowerCamelCase = get_mrpc_setup(_UpperCamelCase ,_UpperCamelCase )
# First do baseline
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = setup['''no''']
model.to(_UpperCamelCase )
model.eval()
for batch in dataloader:
batch.to(_UpperCamelCase )
with torch.inference_mode():
__lowerCamelCase = model(**_UpperCamelCase )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_UpperCamelCase ,references=batch['''labels'''] )
__lowerCamelCase = metric.compute()
# Then do distributed
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__lowerCamelCase = model(**_UpperCamelCase )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
__lowerCamelCase = batch['''labels''']
__lowerCamelCase ,__lowerCamelCase = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_UpperCamelCase ,references=_UpperCamelCase )
__lowerCamelCase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] ,distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def a__ ( ):
__lowerCamelCase = Accelerator(split_batches=_UpperCamelCase ,dispatch_batches=_UpperCamelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(_UpperCamelCase ,_UpperCamelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__lowerCamelCase = Accelerator(split_batches=_UpperCamelCase ,dispatch_batches=_UpperCamelCase )
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(_UpperCamelCase ,99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
__lowerCamelCase = Accelerator()
test_torch_metrics(_UpperCamelCase ,5_12 )
accelerator.state._reset_state()
def a__ ( _UpperCamelCase : Optional[int] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 622 | 1 |
'''simple docstring'''
import math
import flax.linen as nn
import jax.numpy as jnp
def __a(SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float = 1 , SCREAMING_SNAKE_CASE_ : float = 1 , SCREAMING_SNAKE_CASE_ : float = 1.0e4 , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : float = 1.0 , ):
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F'''Embedding dimension {embedding_dim} should be even'''
_lowerCAmelCase = float(embedding_dim // 2 )
_lowerCAmelCase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
_lowerCAmelCase = min_timescale * jnp.exp(jnp.arange(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa ) * -log_timescale_increment )
_lowerCAmelCase = jnp.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) * jnp.expand_dims(SCREAMING_SNAKE_CASE_ , 0 )
# scale embeddings
_lowerCAmelCase = scale * emb
if flip_sin_to_cos:
_lowerCAmelCase = jnp.concatenate([jnp.cos(SCREAMING_SNAKE_CASE_ ), jnp.sin(SCREAMING_SNAKE_CASE_ )] , axis=1 )
else:
_lowerCAmelCase = jnp.concatenate([jnp.sin(SCREAMING_SNAKE_CASE_ ), jnp.cos(SCREAMING_SNAKE_CASE_ )] , axis=1 )
_lowerCAmelCase = jnp.reshape(SCREAMING_SNAKE_CASE_ , [jnp.shape(SCREAMING_SNAKE_CASE_ )[0], embedding_dim] )
return signal
class lowerCAmelCase_ ( nn.Module ):
__lowerCamelCase : int = 32
__lowerCamelCase : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(_lowerCAmelCase )
_lowerCAmelCase = nn.silu(_lowerCAmelCase )
_lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(_lowerCAmelCase )
return temb
class lowerCAmelCase_ ( nn.Module ):
__lowerCamelCase : int = 32
__lowerCamelCase : bool = False
__lowerCamelCase : float = 1
@nn.compact
def __call__( self , _lowerCAmelCase ) -> List[Any]:
return get_sinusoidal_embeddings(
_lowerCAmelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 18 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase__ ( a , a ):
__snake_case = u
for i in range(1 , a ):
__snake_case = temp * (u - i)
return temp
def lowerCamelCase__ ( ):
__snake_case = int(input('enter the numbers of values: ' ) )
__snake_case = []
for _ in range(a ):
y.append([] )
for i in range(a ):
for j in range(a ):
y[i].append(a )
__snake_case = 0
print('enter the values of parameters in a list: ' )
__snake_case = list(map(a , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(a ):
__snake_case = float(input() )
__snake_case = int(input('enter the value to interpolate: ' ) )
__snake_case = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , a ):
for j in range(n - i ):
__snake_case = y[j + 1][i - 1] - y[j][i - 1]
__snake_case = y[0][0]
for i in range(1 , a ):
summ += (ucal(a , a ) * y[0][i]) / math.factorial(a )
print(f'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 356 | 0 |
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[int] = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class A ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Optional[int] = """data2vec-text"""
def __init__( self : List[str] , _UpperCamelCase : List[str]=30_522 , _UpperCamelCase : Union[str, Any]=768 , _UpperCamelCase : Dict=12 , _UpperCamelCase : Optional[Any]=12 , _UpperCamelCase : Optional[Any]=3_072 , _UpperCamelCase : List[Any]="gelu" , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : Dict=512 , _UpperCamelCase : Optional[Any]=2 , _UpperCamelCase : Any=0.0_2 , _UpperCamelCase : Dict=1e-12 , _UpperCamelCase : Union[str, Any]=1 , _UpperCamelCase : Optional[Any]=0 , _UpperCamelCase : Dict=2 , _UpperCamelCase : Optional[Any]="absolute" , _UpperCamelCase : Any=True , _UpperCamelCase : List[str]=None , **_UpperCamelCase : Tuple , ):
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase)
_lowercase: str = vocab_size
_lowercase: Tuple = hidden_size
_lowercase: Optional[int] = num_hidden_layers
_lowercase: Optional[Any] = num_attention_heads
_lowercase: Any = hidden_act
_lowercase: Any = intermediate_size
_lowercase: List[Any] = hidden_dropout_prob
_lowercase: Optional[int] = attention_probs_dropout_prob
_lowercase: Optional[Any] = max_position_embeddings
_lowercase: str = type_vocab_size
_lowercase: List[Any] = initializer_range
_lowercase: List[str] = layer_norm_eps
_lowercase: int = position_embedding_type
_lowercase: Union[str, Any] = use_cache
_lowercase: Any = classifier_dropout
class A ( lowerCamelCase_ ):
'''simple docstring'''
@property
def UpperCAmelCase__ ( self : Optional[Any]):
if self.task == "multiple-choice":
_lowercase: str = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowercase: Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 206 | 0 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_lowercase : Any =logging.get_logger(__name__)
# General docstring
_lowercase : Any ='ResNetConfig'
# Base docstring
_lowercase : Optional[Any] ='microsoft/resnet-50'
_lowercase : List[Any] =[1, 2048, 7, 7]
# Image classification docstring
_lowercase : int ='microsoft/resnet-50'
_lowercase : List[Any] ='tiger cat'
_lowercase : Optional[Any] =[
'microsoft/resnet-50',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase , __lowercase = 3 , __lowercase = 1 , __lowercase = "relu" ) -> Tuple:
"""simple docstring"""
super().__init__()
a__ : List[str] = nn.Convad(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , padding=kernel_size // 2 , bias=SCREAMING_SNAKE_CASE_ )
a__ : List[Any] = nn.BatchNormad(SCREAMING_SNAKE_CASE_ )
a__ : Optional[Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Tensor:
"""simple docstring"""
a__ : str = self.convolution(SCREAMING_SNAKE_CASE_ )
a__ : Optional[int] = self.normalization(SCREAMING_SNAKE_CASE_ )
a__ : Optional[Any] = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self , __lowercase ) -> List[Any]:
"""simple docstring"""
super().__init__()
a__ : Optional[Any] = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
a__ : Tuple = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
a__ : int = config.num_channels
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Tensor:
"""simple docstring"""
a__ : List[Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
a__ : Union[str, Any] = self.embedder(SCREAMING_SNAKE_CASE_ )
a__ : List[Any] = self.pooler(SCREAMING_SNAKE_CASE_ )
return embedding
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase , __lowercase = 2 ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
a__ : Dict = nn.Convad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=1 , stride=SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ )
a__ : Optional[int] = nn.BatchNormad(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Tensor:
"""simple docstring"""
a__ : Union[str, Any] = self.convolution(SCREAMING_SNAKE_CASE_ )
a__ : Dict = self.normalization(SCREAMING_SNAKE_CASE_ )
return hidden_state
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase , __lowercase = 1 , __lowercase = "relu" ) -> Dict:
"""simple docstring"""
super().__init__()
a__ : List[Any] = in_channels != out_channels or stride != 1
a__ : Dict = (
ResNetShortCut(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ ) if should_apply_shortcut else nn.Identity()
)
a__ : str = nn.Sequential(
ResNetConvLayer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ ) , ResNetConvLayer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , activation=SCREAMING_SNAKE_CASE_ ) , )
a__ : List[str] = ACTaFN[activation]
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Dict:
"""simple docstring"""
a__ : Union[str, Any] = hidden_state
a__ : Dict = self.layer(SCREAMING_SNAKE_CASE_ )
a__ : str = self.shortcut(SCREAMING_SNAKE_CASE_ )
hidden_state += residual
a__ : Union[str, Any] = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase , __lowercase = 1 , __lowercase = "relu" , __lowercase = 4 ) -> int:
"""simple docstring"""
super().__init__()
a__ : List[Any] = in_channels != out_channels or stride != 1
a__ : List[str] = out_channels // reduction
a__ : Tuple = (
ResNetShortCut(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ ) if should_apply_shortcut else nn.Identity()
)
a__ : List[Any] = nn.Sequential(
ResNetConvLayer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=1 ) , ResNetConvLayer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ ) , ResNetConvLayer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ ) , )
a__ : List[Any] = ACTaFN[activation]
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Optional[Any]:
"""simple docstring"""
a__ : Optional[Any] = hidden_state
a__ : List[str] = self.layer(SCREAMING_SNAKE_CASE_ )
a__ : Union[str, Any] = self.shortcut(SCREAMING_SNAKE_CASE_ )
hidden_state += residual
a__ : int = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase = 2 , __lowercase = 2 , ) -> str:
"""simple docstring"""
super().__init__()
a__ : Union[str, Any] = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer
a__ : List[Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act ) , *[layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Tensor:
"""simple docstring"""
a__ : Union[str, Any] = input
for layer in self.layers:
a__ : List[Any] = layer(SCREAMING_SNAKE_CASE_ )
return hidden_state
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self , __lowercase ) -> Optional[int]:
"""simple docstring"""
super().__init__()
a__ : Any = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
SCREAMING_SNAKE_CASE_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
a__ : Tuple = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(SCREAMING_SNAKE_CASE_ , config.depths[1:] ):
self.stages.append(ResNetStage(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , depth=SCREAMING_SNAKE_CASE_ ) )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = False , __lowercase = True ) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
a__ : Optional[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
a__ : Union[str, Any] = hidden_states + (hidden_state,)
a__ : Any = stage_module(SCREAMING_SNAKE_CASE_ )
if output_hidden_states:
a__ : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_ , )
class snake_case__ (_lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase :Dict = ResNetConfig
__lowerCAmelCase :Tuple = "resnet"
__lowerCAmelCase :str = "pixel_values"
__lowerCAmelCase :Optional[Any] = True
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Optional[Any]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(SCREAMING_SNAKE_CASE_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase=False ) -> int:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
a__ : Tuple = value
_lowercase : Any =r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_lowercase : Dict =r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." , _lowerCAmelCase , )
class snake_case__ (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self , __lowercase ) -> Optional[Any]:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE_ )
a__ : List[Any] = config
a__ : Union[str, Any] = ResNetEmbeddings(SCREAMING_SNAKE_CASE_ )
a__ : Optional[Any] = ResNetEncoder(SCREAMING_SNAKE_CASE_ )
a__ : Optional[int] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None , __lowercase = None ) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
a__ : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a__ : Any = return_dict if return_dict is not None else self.config.use_return_dict
a__ : Optional[Any] = self.embedder(SCREAMING_SNAKE_CASE_ )
a__ : Dict = self.encoder(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
a__ : str = encoder_outputs[0]
a__ : Dict = self.pooler(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE_ , pooler_output=SCREAMING_SNAKE_CASE_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , _lowerCAmelCase , )
class snake_case__ (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self , __lowercase ) -> str:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE_ )
a__ : List[Any] = config.num_labels
a__ : Tuple = ResNetModel(SCREAMING_SNAKE_CASE_ )
# classification head
a__ : Dict = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE__( self , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , ) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
a__ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
a__ : List[Any] = self.resnet(SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
a__ : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
a__ : Union[str, Any] = self.classifier(SCREAMING_SNAKE_CASE_ )
a__ : Tuple = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
a__ : int = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
a__ : List[str] = """single_label_classification"""
else:
a__ : int = """multi_label_classification"""
if self.config.problem_type == "regression":
a__ : List[str] = MSELoss()
if self.num_labels == 1:
a__ : Any = loss_fct(logits.squeeze() , labels.squeeze() )
else:
a__ : Dict = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif self.config.problem_type == "single_label_classification":
a__ : List[str] = CrossEntropyLoss()
a__ : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
a__ : str = BCEWithLogitsLoss()
a__ : Dict = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not return_dict:
a__ : str = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " , _lowerCAmelCase , )
class snake_case__ (_lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
def __init__( self , __lowercase ) -> Optional[Any]:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE_ )
super()._init_backbone(SCREAMING_SNAKE_CASE_ )
a__ : Optional[Any] = [config.embedding_size] + config.hidden_sizes
a__ : List[str] = ResNetEmbeddings(SCREAMING_SNAKE_CASE_ )
a__ : int = ResNetEncoder(SCREAMING_SNAKE_CASE_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@replace_return_docstrings(output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None , __lowercase = None ) -> BackboneOutput:
"""simple docstring"""
a__ : Any = return_dict if return_dict is not None else self.config.use_return_dict
a__ : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a__ : List[Any] = self.embedder(SCREAMING_SNAKE_CASE_ )
a__ : Optional[int] = self.encoder(SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
a__ : Any = outputs.hidden_states
a__ : Optional[Any] = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
a__ : str = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=SCREAMING_SNAKE_CASE_ , )
| 136 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
a : Optional[Any] = logging.getLogger(__name__)
@dataclass(frozen=_lowerCAmelCase )
class _a :
A = 42
A = 42
A = None
A = None
A = None
@dataclass(frozen=_lowerCAmelCase )
class _a :
A = 42
A = None
A = None
A = None
A = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _a ( _lowerCAmelCase ):
A = 42
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_ = False, ) -> Union[str, Any]:
UpperCAmelCase_: Optional[Any] = hans_processors[task]()
UpperCAmelCase_: Dict = os.path.join(
SCREAMING_SNAKE_CASE_, """cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""", tokenizer.__class__.__name__, str(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_, ), )
UpperCAmelCase_: Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_: Tuple = label_list[2], label_list[1]
UpperCAmelCase_: str = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase_: int = cached_features_file + """.lock"""
with FileLock(SCREAMING_SNAKE_CASE_ ):
if os.path.exists(SCREAMING_SNAKE_CASE_ ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
UpperCAmelCase_: Dict = torch.load(SCREAMING_SNAKE_CASE_ )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
UpperCAmelCase_: Union[str, Any] = (
processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ )
)
logger.info("""Training examples: %s""", len(SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: List[Any] = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
logger.info("""Saving features into cached file %s""", SCREAMING_SNAKE_CASE_ )
torch.save(self.features, SCREAMING_SNAKE_CASE_ )
def __len__(self ) -> Optional[int]:
return len(self.features )
def __getitem__(self, SCREAMING_SNAKE_CASE_ ) -> InputFeatures:
return self.features[i]
def __snake_case (self ) -> List[str]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class _a :
A = 42
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = 128, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_ = False, ) -> Optional[int]:
UpperCAmelCase_: Union[str, Any] = hans_processors[task]()
UpperCAmelCase_: List[Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_: List[str] = label_list[2], label_list[1]
UpperCAmelCase_: Dict = label_list
UpperCAmelCase_: List[str] = processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ), desc="""convert examples to features""" ):
if ex_index % 10000 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(SCREAMING_SNAKE_CASE_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase_: List[str] = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE_, (
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
), (
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
), )
def __snake_case (self ) -> Optional[Any]:
return self.dataset
def __len__(self ) -> Optional[int]:
return len(self.features )
def __getitem__(self, SCREAMING_SNAKE_CASE_ ) -> InputFeatures:
return self.features[i]
def __snake_case (self ) -> Union[str, Any]:
return self.label_list
class _a ( _lowerCAmelCase ):
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Dict:
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_, """heuristics_train_set.txt""" ) ), """train""" )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Dict:
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_, """heuristics_evaluation_set.txt""" ) ), """dev""" )
def __snake_case (self ) -> Optional[int]:
return ["contradiction", "entailment", "neutral"]
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCAmelCase_: Optional[Any] = []
for i, line in enumerate(SCREAMING_SNAKE_CASE_ ):
if i == 0:
continue
UpperCAmelCase_: Tuple = """%s-%s""" % (set_type, line[0])
UpperCAmelCase_: int = line[5]
UpperCAmelCase_: int = line[6]
UpperCAmelCase_: List[str] = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
UpperCAmelCase_: Any = line[0]
examples.append(InputExample(guid=SCREAMING_SNAKE_CASE_, text_a=SCREAMING_SNAKE_CASE_, text_b=SCREAMING_SNAKE_CASE_, label=SCREAMING_SNAKE_CASE_, pairID=SCREAMING_SNAKE_CASE_ ) )
return examples
def lowerCAmelCase_ (lowerCAmelCase__: List[InputExample] , lowerCAmelCase__: List[str] , lowerCAmelCase__: int , lowerCAmelCase__: PreTrainedTokenizer , ):
"""simple docstring"""
UpperCAmelCase_: List[str] = {label: i for i, label in enumerate(lowerCAmelCase__ )}
UpperCAmelCase_: Optional[int] = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCAmelCase__ ) , desc="""convert examples to features""" ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("""Writing example %d""" % (ex_index) )
UpperCAmelCase_: Tuple = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="""max_length""" , truncation=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , )
UpperCAmelCase_: Optional[Any] = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase_: Tuple = int(example.pairID )
features.append(InputFeatures(**lowerCAmelCase__ , label=lowerCAmelCase__ , pairID=lowerCAmelCase__ ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(F'guid: {example}' )
logger.info(F'features: {features[i]}' )
return features
a : Optional[int] = {
'hans': 3,
}
a : Dict = {
'hans': HansProcessor,
}
| 556 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 48 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
a_ = logging.get_logger(__name__)
a_ = {
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Dict = """deberta-v2"""
def __init__(self , lowercase__=12_81_00 , lowercase__=15_36 , lowercase__=24 , lowercase__=24 , lowercase__=61_44 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=0 , lowercase__=0.02 , lowercase__=1e-7 , lowercase__=False , lowercase__=-1 , lowercase__=0 , lowercase__=True , lowercase__=None , lowercase__=0 , lowercase__="gelu" , **lowercase__ , ):
super().__init__(**lowercase__ )
snake_case_ : Union[str, Any] = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Tuple = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Optional[int] = hidden_act
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : List[Any] = max_position_embeddings
snake_case_ : Union[str, Any] = type_vocab_size
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : List[Any] = relative_attention
snake_case_ : Dict = max_relative_positions
snake_case_ : Optional[int] = pad_token_id
snake_case_ : List[str] = position_biased_input
# Backwards compatibility
if type(lowercase__ ) == str:
snake_case_ : Union[str, Any] = [x.strip() for x in pos_att_type.lower().split("""|""" )]
snake_case_ : Optional[int] = pos_att_type
snake_case_ : List[str] = vocab_size
snake_case_ : Tuple = layer_norm_eps
snake_case_ : List[Any] = kwargs.get("""pooler_hidden_size""" , lowercase__ )
snake_case_ : List[str] = pooler_dropout
snake_case_ : int = pooler_hidden_act
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
@property
def __UpperCamelCase (self ):
if self.task == "multiple-choice":
snake_case_ : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case_ : int = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def __UpperCamelCase (self ):
return 12
def __UpperCamelCase (self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , lowercase__ = 3 , lowercase__ = 40 , lowercase__ = 40 , lowercase__ = None , ):
snake_case_ : str = super().generate_dummy_inputs(preprocessor=lowercase__ , framework=lowercase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 48 | 1 |
"""simple docstring"""
import sys
__UpperCamelCase = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowercase (SCREAMING_SNAKE_CASE_ : str = N ) -> Optional[int]:
SCREAMING_SNAKE_CASE = -sys.maxsize - 1
for i in range(len(lowerCamelCase_ ) - 12 ):
SCREAMING_SNAKE_CASE = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
SCREAMING_SNAKE_CASE = product
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 247 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__A : int = random.Random()
def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :Optional[int]=1.0 , lowerCamelCase_ :List[Any]=None , lowerCamelCase_ :Optional[Any]=None ):
'''simple docstring'''
if rng is None:
snake_case_ : str = global_rng
snake_case_ : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self :str ,_UpperCamelCase :Dict ,_UpperCamelCase :List[Any]=7 ,_UpperCamelCase :List[Any]=4_0_0 ,_UpperCamelCase :Any=2_0_0_0 ,_UpperCamelCase :Union[str, Any]=1 ,_UpperCamelCase :Tuple=0.0 ,_UpperCamelCase :Union[str, Any]=1_6_0_0_0 ,_UpperCamelCase :Union[str, Any]=True ,_UpperCamelCase :List[Any]=True ,):
snake_case_ : Tuple = parent
snake_case_ : Any = batch_size
snake_case_ : Dict = min_seq_length
snake_case_ : List[str] = max_seq_length
snake_case_ : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case_ : Union[str, Any] = feature_size
snake_case_ : Optional[Any] = padding_value
snake_case_ : List[Any] = sampling_rate
snake_case_ : Union[str, Any] = return_attention_mask
snake_case_ : Dict = do_normalize
def a__ ( self :Any ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self :Union[str, Any] ,_UpperCamelCase :Optional[Any]=False ,_UpperCamelCase :Any=False ):
def _flatten(_UpperCamelCase :Tuple ):
return list(itertools.chain(*_UpperCamelCase ) )
if equal_length:
snake_case_ : int = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
snake_case_ : Any = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
snake_case_ : int = [np.asarray(_UpperCamelCase ) for x in speech_inputs]
return speech_inputs
class __UpperCamelCase ( lowercase__ , unittest.TestCase ):
lowercase : List[Any] = WavaVecaFeatureExtractor
def a__ ( self :Tuple ):
snake_case_ : List[Any] = WavaVecaFeatureExtractionTester(self )
def a__ ( self :Any ,_UpperCamelCase :Dict ):
self.assertTrue(np.all(np.mean(_UpperCamelCase ,axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_UpperCamelCase ,axis=0 ) - 1 ) < 1E-3 ) )
def a__ ( self :Dict ):
# Tests that all call wrap to encode_plus and batch_encode_plus
snake_case_ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case_ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(8_0_0 ,1_4_0_0 ,2_0_0 )]
snake_case_ : Any = [np.asarray(_UpperCamelCase ) for speech_input in speech_inputs]
# Test not batched input
snake_case_ : Tuple = feat_extract(speech_inputs[0] ,return_tensors="""np""" ).input_values
snake_case_ : int = feat_extract(np_speech_inputs[0] ,return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(_UpperCamelCase ,_UpperCamelCase ,atol=1E-3 ) )
# Test batched
snake_case_ : str = feat_extract(_UpperCamelCase ,return_tensors="""np""" ).input_values
snake_case_ : str = feat_extract(_UpperCamelCase ,return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(_UpperCamelCase ,_UpperCamelCase ):
self.assertTrue(np.allclose(_UpperCamelCase ,_UpperCamelCase ,atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
snake_case_ : List[Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
snake_case_ : int = np.asarray(_UpperCamelCase )
snake_case_ : int = feat_extract(_UpperCamelCase ,return_tensors="""np""" ).input_values
snake_case_ : List[Any] = feat_extract(_UpperCamelCase ,return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(_UpperCamelCase ,_UpperCamelCase ):
self.assertTrue(np.allclose(_UpperCamelCase ,_UpperCamelCase ,atol=1E-3 ) )
def a__ ( self :Tuple ):
snake_case_ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(8_0_0 ,1_4_0_0 ,2_0_0 )]
snake_case_ : int = ["""longest""", """max_length""", """do_not_pad"""]
snake_case_ : Any = [None, 1_6_0_0, None]
for max_length, padding in zip(_UpperCamelCase ,_UpperCamelCase ):
snake_case_ : Optional[int] = feat_extract(_UpperCamelCase ,padding=_UpperCamelCase ,max_length=_UpperCamelCase ,return_tensors="""np""" )
snake_case_ : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def a__ ( self :Optional[Any] ):
snake_case_ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : Optional[int] = range(8_0_0 ,1_4_0_0 ,2_0_0 )
snake_case_ : str = [floats_list((1, x) )[0] for x in lengths]
snake_case_ : Tuple = ["""longest""", """max_length""", """do_not_pad"""]
snake_case_ : Tuple = [None, 1_6_0_0, None]
for max_length, padding in zip(_UpperCamelCase ,_UpperCamelCase ):
snake_case_ : Dict = feat_extract(_UpperCamelCase ,max_length=_UpperCamelCase ,padding=_UpperCamelCase )
snake_case_ : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def a__ ( self :Any ):
snake_case_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : List[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 ,1_4_0_0 ,2_0_0 )]
snake_case_ : int = feat_extract(
_UpperCamelCase ,truncation=_UpperCamelCase ,max_length=1_0_0_0 ,padding="""max_length""" ,return_tensors="""np""" )
snake_case_ : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def a__ ( self :Tuple ):
snake_case_ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : int = [floats_list((1, x) )[0] for x in range(8_0_0 ,1_4_0_0 ,2_0_0 )]
snake_case_ : Optional[Any] = feat_extract(
_UpperCamelCase ,truncation=_UpperCamelCase ,max_length=1_0_0_0 ,padding="""longest""" ,return_tensors="""np""" )
snake_case_ : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
snake_case_ : Optional[int] = [floats_list((1, x) )[0] for x in range(8_0_0 ,1_4_0_0 ,2_0_0 )]
snake_case_ : Any = feat_extract(
_UpperCamelCase ,truncation=_UpperCamelCase ,max_length=2_0_0_0 ,padding="""longest""" ,return_tensors="""np""" )
snake_case_ : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
@require_torch
def a__ ( self :int ):
import torch
snake_case_ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : Any = np.random.rand(1_0_0 ).astype(np.floataa )
snake_case_ : List[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case_ : Tuple = feature_extractor.pad([{"""input_values""": inputs}] ,return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
snake_case_ : str = feature_extractor.pad([{"""input_values""": inputs}] ,return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def a__ ( self :Any ):
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
snake_case_ : Union[str, Any] = WavaVecaConfig.from_pretrained(_UpperCamelCase )
snake_case_ : str = WavaVecaFeatureExtractor.from_pretrained(_UpperCamelCase )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask ,config.feat_extract_norm == """layer""" ) | 334 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """dpr"""
def __init__( self :Optional[int] , lowerCamelCase_ :Dict=3_05_22 , lowerCamelCase_ :Any=7_68 , lowerCamelCase_ :List[Any]=12 , lowerCamelCase_ :str=12 , lowerCamelCase_ :Union[str, Any]=30_72 , lowerCamelCase_ :List[Any]="gelu" , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :List[str]=5_12 , lowerCamelCase_ :Optional[Any]=2 , lowerCamelCase_ :str=0.0_2 , lowerCamelCase_ :Optional[Any]=1E-12 , lowerCamelCase_ :List[str]=0 , lowerCamelCase_ :Any="absolute" , lowerCamelCase_ :int = 0 , **lowerCamelCase_ :Tuple , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : str = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = type_vocab_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[Any] = projection_dim
SCREAMING_SNAKE_CASE : List[Any] = position_embedding_type
| 720 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :List[str] , lowerCamelCase_ :Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = nn.ModuleList(lowerCamelCase_ )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :Union[torch.Tensor, float, int] , lowerCamelCase_ :torch.Tensor , lowerCamelCase_ :List[torch.tensor] , lowerCamelCase_ :List[float] , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[Dict[str, Any]] = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :bool = True , ) -> Union[ControlNetOutput, Tuple]:
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ , self.nets ) ):
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = controlnet(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE : Optional[int] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowerCamelCase_ , lowerCamelCase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Union[str, os.PathLike] , lowerCamelCase_ :bool = True , lowerCamelCase_ :Callable = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[str] = None , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Any = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowerCamelCase_ , is_main_process=lowerCamelCase_ , save_function=lowerCamelCase_ , safe_serialization=lowerCamelCase_ , variant=lowerCamelCase_ , )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = model_path_to_save + f"_{idx}"
@classmethod
def __lowerCAmelCase ( cls :Dict , lowerCamelCase_ :Optional[Union[str, os.PathLike]] , **lowerCamelCase_ :Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Optional[int] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE : Dict = pretrained_model_path
while os.path.isdir(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = ControlNetModel.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
controlnets.append(lowerCamelCase_ )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + f"_{idx}"
logger.info(f"{len(lowerCamelCase_ )} controlnets loaded from {pretrained_model_path}." )
if len(lowerCamelCase_ ) == 0:
raise ValueError(
f"No ControlNets found under {os.path.dirname(lowerCamelCase_ )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(lowerCamelCase_ )
| 18 | 0 |
def _lowerCAmelCase ( _lowerCAmelCase = 1_0_0_0 ):
'''simple docstring'''
A_ : int = 3
A_ : int = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 569 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class _UpperCAmelCase :
def _lowerCamelCase ( self ):
torch.manual_seed(0 )
A_ : Optional[Any] = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
A_ : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
A_ : Any = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
A_ : str = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , thresholding=a__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
A_ : Tuple = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowerCamelCase ( self ):
torch.manual_seed(0 )
A_ : Tuple = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
A_ : Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
A_ : int = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.414 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
A_ : Union[str, Any] = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , thresholding=a__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
A_ : Union[str, Any] = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
A_ : List[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowerCamelCase ( self ):
A_ : str = self.get_dummy_components()
A_ : List[Any] = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
A_ : Optional[Any] = self.get_dummy_inputs(a__ )
A_ : List[str] = inputs["""prompt"""]
A_ : str = inputs["""generator"""]
A_ : Tuple = inputs["""num_inference_steps"""]
A_ : Optional[int] = inputs["""output_type"""]
if "image" in inputs:
A_ : List[str] = inputs["""image"""]
else:
A_ : Optional[int] = None
if "mask_image" in inputs:
A_ : int = inputs["""mask_image"""]
else:
A_ : str = None
if "original_image" in inputs:
A_ : List[Any] = inputs["""original_image"""]
else:
A_ : int = None
A_ , A_ : Optional[int] = pipe.encode_prompt(a__ )
# inputs with prompt converted to embeddings
A_ : Optional[int] = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
A_ : str = image
if mask_image is not None:
A_ : Dict = mask_image
if original_image is not None:
A_ : Optional[int] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(a__ , a__ , a__ )
A_ : List[Any] = pipe(**a__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a__ )
A_ : Union[str, Any] = self.pipeline_class.from_pretrained(a__ )
pipe_loaded.to(a__ )
pipe_loaded.set_progress_bar_config(disable=a__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(a__ , a__ ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
A_ : Optional[int] = self.get_dummy_inputs(a__ )
A_ : int = inputs["""generator"""]
A_ : List[Any] = inputs["""num_inference_steps"""]
A_ : Dict = inputs["""output_type"""]
# inputs with prompt converted to embeddings
A_ : Dict = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
A_ : Any = image
if mask_image is not None:
A_ : Optional[int] = mask_image
if original_image is not None:
A_ : int = original_image
A_ : Optional[Any] = pipe_loaded(**a__ )[0]
A_ : Optional[int] = np.abs(to_np(a__ ) - to_np(a__ ) ).max()
self.assertLess(a__ , 1E-4 )
def _lowerCamelCase ( self ):
A_ : Dict = self.get_dummy_components()
A_ : Optional[Any] = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
A_ : Any = self.get_dummy_inputs(a__ )
A_ : List[Any] = pipe(**a__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a__ )
A_ : List[Any] = self.pipeline_class.from_pretrained(a__ )
pipe_loaded.to(a__ )
pipe_loaded.set_progress_bar_config(disable=a__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
A_ : Optional[Any] = self.get_dummy_inputs(a__ )
A_ : Optional[Any] = pipe_loaded(**a__ )[0]
A_ : Union[str, Any] = np.abs(to_np(a__ ) - to_np(a__ ) ).max()
self.assertLess(a__ , 1E-4 )
| 569 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : int = u
for i in range(1 , lowerCAmelCase_ ):
_UpperCAmelCase : Optional[int] = temp * (u - i)
return temp
def __A ( ):
_UpperCAmelCase : str = int(input("""enter the numbers of values: """ ) )
_UpperCAmelCase : list[list[float]] = []
for _ in range(lowerCAmelCase_ ):
y.append([] )
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
y[i].append(lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = 0
print("""enter the values of parameters in a list: """ )
_UpperCAmelCase : Tuple = list(map(lowerCAmelCase_ , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(lowerCAmelCase_ ):
_UpperCAmelCase : Optional[Any] = float(input() )
_UpperCAmelCase : Tuple = int(input("""enter the value to interpolate: """ ) )
_UpperCAmelCase : int = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , lowerCAmelCase_ ):
for j in range(n - i ):
_UpperCAmelCase : List[str] = y[j + 1][i - 1] - y[j][i - 1]
_UpperCAmelCase : Union[str, Any] = y[0][0]
for i in range(1 , lowerCAmelCase_ ):
summ += (ucal(lowerCAmelCase_ , lowerCAmelCase_ ) * y[0][i]) / math.factorial(lowerCAmelCase_ )
print(f"the value at {value} is {summ}" )
if __name__ == "__main__":
main()
| 704 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : int = tmp_path / """cache"""
_UpperCAmelCase : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase : Tuple = JsonDatasetReader(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ ).read()
_check_json_dataset(lowerCAmelCase_ , lowerCAmelCase_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Any = tmp_path / """cache"""
_UpperCAmelCase : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase : int = features.copy() if features else default_expected_features
_UpperCAmelCase : Union[str, Any] = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase : Any = JsonDatasetReader(lowerCAmelCase_ , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ).read()
_check_json_dataset(lowerCAmelCase_ , lowerCAmelCase_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""},
] , )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Any = tmp_path / """cache"""
_UpperCAmelCase : Optional[Any] = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}
_UpperCAmelCase : int = features.copy() if features else default_expected_features
_UpperCAmelCase : Optional[Any] = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase : Dict = JsonDatasetReader(lowerCAmelCase_ , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ).read()
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
_UpperCAmelCase : Union[str, Any] = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""}
_UpperCAmelCase : Optional[Any] = features.copy()
_UpperCAmelCase : Any = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase : Tuple = tmp_path / """cache"""
_UpperCAmelCase : Optional[Any] = JsonDatasetReader(lowerCAmelCase_ , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ).read()
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Dict = tmp_path / """cache"""
_UpperCAmelCase : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase : List[Any] = JsonDatasetReader(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , split=lowerCAmelCase_ ).read()
_check_json_dataset(lowerCAmelCase_ , lowerCAmelCase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
if issubclass(lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Any = jsonl_path
elif issubclass(lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Union[str, Any] = [jsonl_path]
_UpperCAmelCase : int = tmp_path / """cache"""
_UpperCAmelCase : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase : Any = JsonDatasetReader(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ).read()
_check_json_dataset(lowerCAmelCase_ , lowerCAmelCase_ )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=("train",) ):
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
for split in splits:
_UpperCAmelCase : List[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Optional[Any] = tmp_path / """cache"""
_UpperCAmelCase : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase : List[Any] = JsonDatasetReader({"""train""": jsonl_path} , cache_dir=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ ).read()
_check_json_datasetdict(lowerCAmelCase_ , lowerCAmelCase_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Optional[int] = tmp_path / """cache"""
_UpperCAmelCase : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase : List[str] = features.copy() if features else default_expected_features
_UpperCAmelCase : int = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase : Any = JsonDatasetReader({"""train""": jsonl_path} , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ).read()
_check_json_datasetdict(lowerCAmelCase_ , lowerCAmelCase_ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
if split:
_UpperCAmelCase : str = {split: jsonl_path}
else:
_UpperCAmelCase : int = """train"""
_UpperCAmelCase : int = {"""train""": jsonl_path, """test""": jsonl_path}
_UpperCAmelCase : Optional[int] = tmp_path / """cache"""
_UpperCAmelCase : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase : Optional[Any] = JsonDatasetReader(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ).read()
_check_json_datasetdict(lowerCAmelCase_ , lowerCAmelCase_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __A ( lowerCAmelCase_ ):
return json.load(lowerCAmelCase_ )
def __A ( lowerCAmelCase_ ):
return [json.loads(lowerCAmelCase_ ) for line in buffer]
class __lowerCAmelCase :
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , lines=lowerCAmelCase__ ).write()
buffer.seek(0 )
_UpperCAmelCase : Optional[int] = load_json_function(lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert isinstance(exported_content[0] , lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == 1_0
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , lines=lowerCAmelCase__ , orient=lowerCAmelCase__ ).write()
buffer.seek(0 )
_UpperCAmelCase : str = load_json(lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCAmelCase__ , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(lowerCAmelCase__ ) == 1_0
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , lines=lowerCAmelCase__ , num_proc=2 ).write()
buffer.seek(0 )
_UpperCAmelCase : Optional[int] = load_json_function(lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert isinstance(exported_content[0] , lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == 1_0
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , lines=lowerCAmelCase__ , orient=lowerCAmelCase__ , num_proc=2 ).write()
buffer.seek(0 )
_UpperCAmelCase : Optional[Any] = load_json(lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCAmelCase__ , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(lowerCAmelCase__ ) == 1_0
def snake_case_ (self , lowerCAmelCase__ ):
with pytest.raises(lowerCAmelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=0 )
@pytest.mark.parametrize("""compression, extension""" , [("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Dict = tmp_path_factory.mktemp("""data""" ) / F"test.json.{extension}"
_UpperCAmelCase : List[Any] = str(shared_datadir / F"test_file.json.{extension}" )
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , compression=lowerCAmelCase__ ).write()
with fsspec.open(lowerCAmelCase__ , """rb""" , compression="""infer""" ) as f:
_UpperCAmelCase : str = f.read()
with fsspec.open(lowerCAmelCase__ , """rb""" , compression="""infer""" ) as f:
_UpperCAmelCase : Optional[int] = f.read()
assert exported_content == original_content
| 156 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : str = ['image_processor', 'tokenizer']
A_ : Optional[int] = 'ViTImageProcessor'
A_ : int = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__(self : Union[str, Any] , a__ : Optional[int]=None , a__ : Dict=None , **a__ : List[str] ):
"""simple docstring"""
__snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase_ , )
__snake_case = kwargs.pop('''feature_extractor''' )
__snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase_ , lowercase_ )
def __call__(self : Any , a__ : List[str]=None , a__ : int=None , a__ : Optional[Any]=None , a__ : Dict=None , **a__ : Dict ):
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''' )
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' )
if text is not None:
__snake_case = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if visual_prompt is not None:
__snake_case = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if images is not None:
__snake_case = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if visual_prompt is not None and images is not None:
__snake_case = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
__snake_case = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
__snake_case = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def a (self : Optional[Any] , *a__ : List[Any] , **a__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def a (self : Union[str, Any] , *a__ : Any , **a__ : int ):
"""simple docstring"""
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def a (self : Optional[int] ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase_ , )
return self.image_processor_class
@property
def a (self : int ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowercase_ , )
return self.image_processor
| 592 | from math import isqrt, loga
def lowerCAmelCase__ ( a__ ) ->list[int]:
'''simple docstring'''
_UpperCamelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , a__ , a__ ):
_UpperCamelCase = False
return [i for i in range(2 , a__ ) if is_prime[i]]
def lowerCAmelCase__ ( a__ = 800_800 , a__ = 800_800 ) ->int:
'''simple docstring'''
_UpperCamelCase = degree * loga(a__ )
_UpperCamelCase = int(a__ )
_UpperCamelCase = calculate_prime_numbers(a__ )
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = len(a__ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"{solution() = }")
| 547 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"""tokenizer_file""": """tokenizer.json"""}
_SCREAMING_SNAKE_CASE = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = ["""input_ids""", """attention_mask"""]
__UpperCAmelCase = None
def __init__(self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__=False , lowerCAmelCase__=False , **lowerCAmelCase__ , ):
'''simple docstring'''
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCamelCase : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCAmelCase__ ) != add_prefix_space:
_UpperCamelCase : List[Any] = getattr(lowerCAmelCase__ , pre_tok_state.pop("type" ) )
_UpperCamelCase : Any = add_prefix_space
_UpperCamelCase : Optional[Any] = pre_tok_class(**lowerCAmelCase__ )
_UpperCamelCase : str = add_prefix_space
def lowercase_ (self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : Tuple = kwargs.get("is_split_into_words" , lowerCAmelCase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._batch_encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def lowercase_ (self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : str = kwargs.get("is_split_into_words" , lowerCAmelCase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
'''simple docstring'''
_UpperCamelCase : List[Any] = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def lowercase_ (self , lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) + [self.eos_token_id] )
if len(lowerCAmelCase__ ) > self.model_max_length:
_UpperCamelCase : Optional[int] = input_ids[-self.model_max_length :]
return input_ids
| 239 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__=1_00 , lowerCAmelCase__=13 , lowerCAmelCase__=30 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=10 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , ):
'''simple docstring'''
_UpperCamelCase : Dict = parent
_UpperCamelCase : str = vocab_size
_UpperCamelCase : Tuple = batch_size
_UpperCamelCase : Optional[Any] = image_size
_UpperCamelCase : List[str] = patch_size
_UpperCamelCase : Tuple = num_channels
_UpperCamelCase : Optional[int] = is_training
_UpperCamelCase : Union[str, Any] = use_labels
_UpperCamelCase : List[Any] = hidden_size
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : Union[str, Any] = num_attention_heads
_UpperCamelCase : Tuple = intermediate_size
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : int = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Any = type_sequence_label_size
_UpperCamelCase : Union[str, Any] = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase : Optional[int] = (image_size // patch_size) ** 2
_UpperCamelCase : List[Any] = num_patches + 1
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : Tuple = None
if self.use_labels:
_UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase : Optional[int] = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : List[Any] = FlaxBeitModel(config=lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : Tuple = FlaxBeitForMaskedImageModeling(config=lowerCAmelCase__ )
_UpperCamelCase : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.type_sequence_label_size
_UpperCamelCase : int = FlaxBeitForImageClassification(config=lowerCAmelCase__ )
_UpperCamelCase : Any = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCamelCase : Dict = 1
_UpperCamelCase : Tuple = FlaxBeitForImageClassification(lowerCAmelCase__ )
_UpperCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : str = model(lowerCAmelCase__ )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : List[Any] = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Optional[Any] = config_and_inputs
_UpperCamelCase : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Dict = FlaxBeitModelTester(self )
_UpperCamelCase : Tuple = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def lowercase_ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : int = model_class(lowerCAmelCase__ )
_UpperCamelCase : List[Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Dict = [*signature.parameters.keys()]
_UpperCamelCase : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCamelCase : Tuple = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase : Dict = model_class(lowerCAmelCase__ )
@jax.jit
def model_jitted(lowerCAmelCase__ , **lowerCAmelCase__ ):
return model(pixel_values=lowerCAmelCase__ , **lowerCAmelCase__ )
with self.subTest("JIT Enabled" ):
_UpperCamelCase : Optional[Any] = model_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_UpperCamelCase : int = model_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def lowercase_ (self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCamelCase : Optional[int] = model_class_name.from_pretrained("microsoft/beit-base-patch16-224" )
_UpperCamelCase : Optional[Any] = model(np.ones((1, 3, 2_24, 2_24) ) )
self.assertIsNotNone(lowerCAmelCase__ )
def __lowerCAmelCase ( ) -> Dict:
_UpperCamelCase : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase_ (self ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Tuple = FlaxBeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" )
_UpperCamelCase : Union[str, Any] = self.default_image_processor
_UpperCamelCase : Union[str, Any] = prepare_img()
_UpperCamelCase : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors="np" ).pixel_values
# prepare bool_masked_pos
_UpperCamelCase : List[Any] = np.ones((1, 1_96) , dtype=lowerCAmelCase__ )
# forward pass
_UpperCamelCase : List[Any] = model(pixel_values=lowerCAmelCase__ , bool_masked_pos=lowerCAmelCase__ )
_UpperCamelCase : Dict = outputs.logits
# verify the logits
_UpperCamelCase : Tuple = (1, 1_96, 81_92)
self.assertEqual(logits.shape , lowerCAmelCase__ )
_UpperCamelCase : List[str] = np.array(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , lowerCAmelCase__ , atol=1E-2 ) )
@slow
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Dict = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" )
_UpperCamelCase : int = self.default_image_processor
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : int = image_processor(images=lowerCAmelCase__ , return_tensors="np" )
# forward pass
_UpperCamelCase : Optional[int] = model(**lowerCAmelCase__ )
_UpperCamelCase : Union[str, Any] = outputs.logits
# verify the logits
_UpperCamelCase : List[str] = (1, 10_00)
self.assertEqual(logits.shape , lowerCAmelCase__ )
_UpperCamelCase : List[str] = np.array([-1.2385, -1.0987, -1.0108] )
self.assertTrue(np.allclose(logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
_UpperCamelCase : Optional[Any] = 2_81
self.assertEqual(logits.argmax(-1 ).item() , lowerCAmelCase__ )
@slow
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Any = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" )
_UpperCamelCase : Any = self.default_image_processor
_UpperCamelCase : str = prepare_img()
_UpperCamelCase : Tuple = image_processor(images=lowerCAmelCase__ , return_tensors="np" )
# forward pass
_UpperCamelCase : Optional[int] = model(**lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = outputs.logits
# verify the logits
_UpperCamelCase : Union[str, Any] = (1, 2_18_41)
self.assertEqual(logits.shape , lowerCAmelCase__ )
_UpperCamelCase : List[Any] = np.array([1.6881, -0.2787, 0.5901] )
self.assertTrue(np.allclose(logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
_UpperCamelCase : List[str] = 23_96
self.assertEqual(logits.argmax(-1 ).item() , lowerCAmelCase__ )
| 239 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ : List[Any] = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Any = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__magic_name__ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 102 |
'''simple docstring'''
def __A ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def __A ( ):
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 211 | 0 |
from itertools import count
def __lowerCAmelCase ( _UpperCamelCase = 50 ) -> int:
'''simple docstring'''
lowerCamelCase__: List[Any] = [1] * min_block_length
for n in count(_UpperCamelCase ):
fill_count_functions.append(1 )
for block_length in range(_UpperCamelCase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1000000:
break
return n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 242 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
_lowercase = re.compile(r'\b(a|an|the)\b', re.UNICODE)
_lowercase = None
def __lowerCAmelCase ( ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Any = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=_UpperCamelCase , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=_UpperCamelCase , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __lowerCAmelCase ( _UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[str] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCamelCase__: Any = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def __lowerCAmelCase ( _UpperCamelCase ) -> List[str]:
'''simple docstring'''
def remove_articles(_UpperCamelCase ):
return ARTICLES_REGEX.sub(""" """ , _UpperCamelCase )
def white_space_fix(_UpperCamelCase ):
return " ".join(text.split() )
def remove_punc(_UpperCamelCase ):
lowerCamelCase__: Any = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_UpperCamelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCamelCase ) ) ) )
def __lowerCAmelCase ( _UpperCamelCase ) -> int:
'''simple docstring'''
if not s:
return []
return normalize_answer(_UpperCamelCase ).split()
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
'''simple docstring'''
return int(normalize_answer(_UpperCamelCase ) == normalize_answer(_UpperCamelCase ) )
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowerCamelCase__: Any = get_tokens(_UpperCamelCase )
lowerCamelCase__: Union[str, Any] = get_tokens(_UpperCamelCase )
lowerCamelCase__: List[str] = collections.Counter(_UpperCamelCase ) & collections.Counter(_UpperCamelCase )
lowerCamelCase__: Optional[Any] = sum(common.values() )
if len(_UpperCamelCase ) == 0 or len(_UpperCamelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
lowerCamelCase__: List[str] = 1.0 * num_same / len(_UpperCamelCase )
lowerCamelCase__: Optional[Any] = 1.0 * num_same / len(_UpperCamelCase )
lowerCamelCase__: Dict = (2 * precision * recall) / (precision + recall)
return fa
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> Dict:
'''simple docstring'''
lowerCamelCase__: Any = {}
lowerCamelCase__: str = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCamelCase__: Dict = qa["""id"""]
lowerCamelCase__: Union[str, Any] = [t for t in qa["""answers"""]["""text"""] if normalize_answer(_UpperCamelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowerCamelCase__: int = [""""""]
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
lowerCamelCase__: Optional[Any] = preds[qid]
# Take max over all gold answers
lowerCamelCase__: str = max(compute_exact(_UpperCamelCase , _UpperCamelCase ) for a in gold_answers )
lowerCamelCase__: str = max(compute_fa(_UpperCamelCase , _UpperCamelCase ) for a in gold_answers )
return exact_scores, fa_scores
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
'''simple docstring'''
lowerCamelCase__: List[str] = {}
for qid, s in scores.items():
lowerCamelCase__: Dict = na_probs[qid] > na_prob_thresh
if pred_na:
lowerCamelCase__: Optional[int] = float(not qid_to_has_ans[qid] )
else:
lowerCamelCase__: str = s
return new_scores
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Optional[int]:
'''simple docstring'''
if not qid_list:
lowerCamelCase__: List[str] = len(_UpperCamelCase )
return collections.OrderedDict(
[
("""exact""", 1_00.0 * sum(exact_scores.values() ) / total),
("""f1""", 1_00.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
lowerCamelCase__: int = len(_UpperCamelCase )
return collections.OrderedDict(
[
("""exact""", 1_00.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 1_00.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
'''simple docstring'''
for k in new_eval:
lowerCamelCase__: int = new_eval[k]
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
'''simple docstring'''
plt.step(_UpperCamelCase , _UpperCamelCase , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(_UpperCamelCase , _UpperCamelCase , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(_UpperCamelCase )
plt.savefig(_UpperCamelCase )
plt.clf()
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ) -> str:
'''simple docstring'''
lowerCamelCase__: Tuple = sorted(_UpperCamelCase , key=lambda _UpperCamelCase : na_probs[k] )
lowerCamelCase__: str = 0.0
lowerCamelCase__: Optional[int] = 1.0
lowerCamelCase__: List[Any] = 0.0
lowerCamelCase__: Any = [1.0]
lowerCamelCase__: Any = [0.0]
lowerCamelCase__: List[str] = 0.0
for i, qid in enumerate(_UpperCamelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowerCamelCase__: List[str] = true_pos / float(i + 1 )
lowerCamelCase__: int = true_pos / float(_UpperCamelCase )
if i == len(_UpperCamelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_UpperCamelCase )
recalls.append(_UpperCamelCase )
if out_image:
plot_pr_curve(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return {"ap": 1_00.0 * avg_prec}
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
'''simple docstring'''
if out_image_dir and not os.path.exists(_UpperCamelCase ):
os.makedirs(_UpperCamelCase )
lowerCamelCase__: List[str] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
lowerCamelCase__: int = make_precision_recall_eval(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , out_image=os.path.join(_UpperCamelCase , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
lowerCamelCase__: Union[str, Any] = make_precision_recall_eval(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , out_image=os.path.join(_UpperCamelCase , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
lowerCamelCase__: int = {k: float(_UpperCamelCase ) for k, v in qid_to_has_ans.items()}
lowerCamelCase__: List[str] = make_precision_recall_eval(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , out_image=os.path.join(_UpperCamelCase , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(_UpperCamelCase , _UpperCamelCase , """pr_exact""" )
merge_eval(_UpperCamelCase , _UpperCamelCase , """pr_f1""" )
merge_eval(_UpperCamelCase , _UpperCamelCase , """pr_oracle""" )
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
'''simple docstring'''
if not qid_list:
return
lowerCamelCase__: Dict = [na_probs[k] for k in qid_list]
lowerCamelCase__: int = np.ones_like(_UpperCamelCase ) / float(len(_UpperCamelCase ) )
plt.hist(_UpperCamelCase , weights=_UpperCamelCase , bins=20 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(_UpperCamelCase , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str:
'''simple docstring'''
lowerCamelCase__: List[str] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
lowerCamelCase__: List[str] = num_no_ans
lowerCamelCase__: List[Any] = cur_score
lowerCamelCase__: Tuple = 0.0
lowerCamelCase__: Any = sorted(_UpperCamelCase , key=lambda _UpperCamelCase : na_probs[k] )
for i, qid in enumerate(_UpperCamelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowerCamelCase__: int = scores[qid]
else:
if preds[qid]:
lowerCamelCase__: List[Any] = -1
else:
lowerCamelCase__: Any = 0
cur_score += diff
if cur_score > best_score:
lowerCamelCase__: List[Any] = cur_score
lowerCamelCase__: Union[str, Any] = na_probs[qid]
return 1_00.0 * best_score / len(_UpperCamelCase ), best_thresh
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[str]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[Any] = find_best_thresh(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowerCamelCase__ , lowerCamelCase__: int = find_best_thresh(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowerCamelCase__: int = best_exact
lowerCamelCase__: int = exact_thresh
lowerCamelCase__: Optional[Any] = best_fa
lowerCamelCase__: Union[str, Any] = fa_thresh
def __lowerCAmelCase ( ) -> Optional[int]:
'''simple docstring'''
with open(OPTS.data_file ) as f:
lowerCamelCase__: Any = json.load(_UpperCamelCase )
lowerCamelCase__: List[Any] = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
lowerCamelCase__: str = json.load(_UpperCamelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
lowerCamelCase__: Any = json.load(_UpperCamelCase )
else:
lowerCamelCase__: Dict = {k: 0.0 for k in preds}
lowerCamelCase__: Dict = make_qid_to_has_ans(_UpperCamelCase ) # maps qid to True/False
lowerCamelCase__: Any = [k for k, v in qid_to_has_ans.items() if v]
lowerCamelCase__: Dict = [k for k, v in qid_to_has_ans.items() if not v]
lowerCamelCase__ , lowerCamelCase__: Dict = get_raw_scores(_UpperCamelCase , _UpperCamelCase )
lowerCamelCase__: Union[str, Any] = apply_no_ans_threshold(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , OPTS.na_prob_thresh )
lowerCamelCase__: Tuple = apply_no_ans_threshold(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , OPTS.na_prob_thresh )
lowerCamelCase__: Tuple = make_eval_dict(_UpperCamelCase , _UpperCamelCase )
if has_ans_qids:
lowerCamelCase__: Optional[Any] = make_eval_dict(_UpperCamelCase , _UpperCamelCase , qid_list=_UpperCamelCase )
merge_eval(_UpperCamelCase , _UpperCamelCase , """HasAns""" )
if no_ans_qids:
lowerCamelCase__: Optional[Any] = make_eval_dict(_UpperCamelCase , _UpperCamelCase , qid_list=_UpperCamelCase )
merge_eval(_UpperCamelCase , _UpperCamelCase , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , OPTS.out_image_dir )
histogram_na_prob(_UpperCamelCase , _UpperCamelCase , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(_UpperCamelCase , _UpperCamelCase , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase )
else:
print(json.dumps(_UpperCamelCase , indent=2 ) )
if __name__ == "__main__":
_lowercase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 242 | 1 |
'''simple docstring'''
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : List[Any] = []
lowerCamelCase_ : Optional[Any] = []
lowerCamelCase_ : Tuple = []
for rt in rc.restypes:
lowerCamelCase_ : Dict = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
lowerCamelCase_ : str = {name: i for i, name in enumerate(__UpperCAmelCase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
lowerCamelCase_ : Dict = torch.tensor(
__UpperCAmelCase , dtype=torch.intaa , device=protein['''aatype'''].device , )
lowerCamelCase_ : List[str] = torch.tensor(
__UpperCAmelCase , dtype=torch.intaa , device=protein['''aatype'''].device , )
lowerCamelCase_ : Optional[Any] = torch.tensor(
__UpperCAmelCase , dtype=torch.floataa , device=protein['''aatype'''].device , )
lowerCamelCase_ : Optional[int] = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
lowerCamelCase_ : Dict = restype_atomaa_to_atomaa[protein_aatype]
lowerCamelCase_ : List[str] = restype_atomaa_mask[protein_aatype]
lowerCamelCase_ : Any = residx_atomaa_mask
lowerCamelCase_ : str = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
lowerCamelCase_ : Union[str, Any] = restype_atomaa_to_atomaa[protein_aatype]
lowerCamelCase_ : List[Any] = residx_atomaa_to_atomaa.long()
# create the corresponding mask
lowerCamelCase_ : List[Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
lowerCamelCase_ : Tuple = rc.restype_atoa[restype_letter]
lowerCamelCase_ : Tuple = rc.residue_atoms[restype_name]
for atom_name in atom_names:
lowerCamelCase_ : Any = rc.atom_order[atom_name]
lowerCamelCase_ : Union[str, Any] = 1
lowerCamelCase_ : Optional[Any] = restype_atomaa_mask[protein_aatype]
lowerCamelCase_ : Optional[Any] = residx_atomaa_mask
return protein
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Optional[int] = tree_map(lambda __UpperCAmelCase : torch.tensor(__UpperCAmelCase , device=batch['''aatype'''].device ) , __UpperCAmelCase , np.ndarray )
lowerCamelCase_ : Optional[Any] = tensor_tree_map(lambda __UpperCAmelCase : np.array(__UpperCAmelCase ) , make_atomaa_masks(__UpperCAmelCase ) )
return out
| 501 |
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__lowerCamelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase__ ( _lowerCAmelCase ):
def __init__( self : int , UpperCamelCase_ : CLIPSegForImageSegmentation , UpperCamelCase_ : CLIPSegProcessor , UpperCamelCase_ : AutoencoderKL , UpperCamelCase_ : CLIPTextModel , UpperCamelCase_ : CLIPTokenizer , UpperCamelCase_ : UNetaDConditionModel , UpperCamelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCamelCase_ : StableDiffusionSafetyChecker , UpperCamelCase_ : CLIPImageProcessor , ) -> Optional[int]:
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
lowerCamelCase_ : int = (
F"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
F""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , UpperCamelCase_ , standard_warn=UpperCamelCase_ )
lowerCamelCase_ : Union[str, Any] = dict(scheduler.config )
lowerCamelCase_ : Optional[Any] = 1
lowerCamelCase_ : List[Any] = FrozenDict(UpperCamelCase_ )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
lowerCamelCase_ : Any = (
F"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , UpperCamelCase_ , standard_warn=UpperCamelCase_ )
lowerCamelCase_ : Dict = dict(scheduler.config )
lowerCamelCase_ : Union[str, Any] = True
lowerCamelCase_ : Any = FrozenDict(UpperCamelCase_ )
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=UpperCamelCase_ , segmentation_processor=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=UpperCamelCase_ , )
def __UpperCamelCase ( self : str , UpperCamelCase_ : Optional[Union[str, int]] = "auto" ) -> int:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase_ )
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
self.enable_attention_slicing(UpperCamelCase_ )
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowerCamelCase_ : List[str] = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[str, List[str]] , UpperCamelCase_ : Union[torch.FloatTensor, PIL.Image.Image] , UpperCamelCase_ : str , UpperCamelCase_ : int = 512 , UpperCamelCase_ : int = 512 , UpperCamelCase_ : int = 50 , UpperCamelCase_ : float = 7.5 , UpperCamelCase_ : Optional[Union[str, List[str]]] = None , UpperCamelCase_ : Optional[int] = 1 , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : Optional[torch.Generator] = None , UpperCamelCase_ : Optional[torch.FloatTensor] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase_ : int = 1 , **UpperCamelCase_ : Dict , ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
lowerCamelCase_ : Union[str, Any] = self.segmentation_model(**UpperCamelCase_ )
lowerCamelCase_ : Dict = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowerCamelCase_ : int = self.numpy_to_pil(UpperCamelCase_ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowerCamelCase_ : List[str] = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , height=UpperCamelCase_ , width=UpperCamelCase_ , num_inference_steps=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , negative_prompt=UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ , eta=UpperCamelCase_ , generator=UpperCamelCase_ , latents=UpperCamelCase_ , output_type=UpperCamelCase_ , return_dict=UpperCamelCase_ , callback=UpperCamelCase_ , callback_steps=UpperCamelCase_ , )
| 501 | 1 |
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). " ,lowerCAmelCase ,)
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =RobertaConfig
UpperCAmelCase ="roberta"
def __init__( self , snake_case) -> List[Any]:
'''simple docstring'''
super().__init__(snake_case)
_UpperCAmelCase : Optional[Any] =RobertaEmbeddings(snake_case)
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. " ,lowerCAmelCase ,)
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =RobertaConfig
UpperCAmelCase ="roberta"
def __init__( self , snake_case) -> Optional[int]:
'''simple docstring'''
super().__init__(snake_case)
_UpperCAmelCase : Optional[int] =config.num_labels
_UpperCAmelCase : Optional[int] =config.num_hidden_layers
_UpperCAmelCase : int =DeeRobertaModel(snake_case)
_UpperCAmelCase : Optional[Any] =nn.Dropout(config.hidden_dropout_prob)
_UpperCAmelCase : Any =nn.Linear(config.hidden_size , self.config.num_labels)
@add_start_docstrings_to_model_forward(snake_case)
def lowerCAmelCase ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=-1 , snake_case=False , ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict =self.num_layers
try:
_UpperCAmelCase : str =self.roberta(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , position_ids=snake_case , head_mask=snake_case , inputs_embeds=snake_case , )
_UpperCAmelCase : List[Any] =outputs[1]
_UpperCAmelCase : Any =self.dropout(snake_case)
_UpperCAmelCase : Any =self.classifier(snake_case)
_UpperCAmelCase : List[Any] =(logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_UpperCAmelCase : Tuple =e.message
_UpperCAmelCase : str =e.exit_layer
_UpperCAmelCase : Optional[Any] =outputs[0]
if not self.training:
_UpperCAmelCase : str =entropy(snake_case)
_UpperCAmelCase : List[str] =[]
_UpperCAmelCase : Union[str, Any] =[]
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_UpperCAmelCase : str =MSELoss()
_UpperCAmelCase : Optional[Any] =loss_fct(logits.view(-1) , labels.view(-1))
else:
_UpperCAmelCase : str =CrossEntropyLoss()
_UpperCAmelCase : Dict =loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
# work with highway exits
_UpperCAmelCase : Tuple =[]
for highway_exit in outputs[-1]:
_UpperCAmelCase : Tuple =highway_exit[0]
if not self.training:
highway_logits_all.append(snake_case)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
_UpperCAmelCase : Tuple =MSELoss()
_UpperCAmelCase : List[str] =loss_fct(highway_logits.view(-1) , labels.view(-1))
else:
_UpperCAmelCase : str =CrossEntropyLoss()
_UpperCAmelCase : str =loss_fct(highway_logits.view(-1 , self.num_labels) , labels.view(-1))
highway_losses.append(snake_case)
if train_highway:
_UpperCAmelCase : Optional[Any] =(sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
_UpperCAmelCase : str =(loss,) + outputs
if not self.training:
_UpperCAmelCase : int =outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_UpperCAmelCase : List[Any] =(
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 331 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase =random.Random()
if is_torch_available():
import torch
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : int=1.0 , __lowerCamelCase : str=None , __lowerCamelCase : Tuple=None ):
'''simple docstring'''
if rng is None:
_UpperCAmelCase : Optional[Any] =global_rng
_UpperCAmelCase : Optional[int] =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __magic_name__ ( unittest.TestCase ):
def __init__( self , snake_case , snake_case=7 , snake_case=4_0_0 , snake_case=2_0_0_0 , snake_case=1 , snake_case=0.0 , snake_case=1_6_0_0_0 , snake_case=True , snake_case=True , ) -> int:
'''simple docstring'''
_UpperCAmelCase : int =parent
_UpperCAmelCase : Any =batch_size
_UpperCAmelCase : Tuple =min_seq_length
_UpperCAmelCase : Tuple =max_seq_length
_UpperCAmelCase : Optional[Any] =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase : int =feature_size
_UpperCAmelCase : List[str] =padding_value
_UpperCAmelCase : int =sampling_rate
_UpperCAmelCase : List[str] =return_attention_mask
_UpperCAmelCase : Tuple =do_normalize
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCAmelCase ( self , snake_case=False , snake_case=False) -> Any:
'''simple docstring'''
def _flatten(snake_case):
return list(itertools.chain(*snake_case))
if equal_length:
_UpperCAmelCase : List[Any] =floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
_UpperCAmelCase : Optional[Any] =[
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
_UpperCAmelCase : Optional[int] =[np.asarray(snake_case) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __magic_name__ ( lowerCAmelCase ,unittest.TestCase ):
UpperCAmelCase =ASTFeatureExtractor
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple =ASTFeatureExtractionTester(self)
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
# Tests that all call wrap to encode_plus and batch_encode_plus
_UpperCAmelCase : str =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase : str =[floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
_UpperCAmelCase : Optional[int] =[np.asarray(snake_case) for speech_input in speech_inputs]
# Test not batched input
_UpperCAmelCase : List[str] =feat_extract(speech_inputs[0] , return_tensors='np').input_values
_UpperCAmelCase : List[Any] =feat_extract(np_speech_inputs[0] , return_tensors='np').input_values
self.assertTrue(np.allclose(snake_case , snake_case , atol=1E-3))
# Test batched
_UpperCAmelCase : Tuple =feat_extract(snake_case , padding=snake_case , return_tensors='np').input_values
_UpperCAmelCase : List[Any] =feat_extract(snake_case , padding=snake_case , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1E-3))
# Test 2-D numpy arrays are batched.
_UpperCAmelCase : Dict =[floats_list((1, x))[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_UpperCAmelCase : Tuple =np.asarray(snake_case)
_UpperCAmelCase : Optional[Any] =feat_extract(snake_case , return_tensors='np').input_values
_UpperCAmelCase : Dict =feat_extract(snake_case , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1E-3))
@require_torch
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
import torch
_UpperCAmelCase : Any =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_UpperCAmelCase : int =np.random.rand(1_0_0).astype(np.floataa)
_UpperCAmelCase : str =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase : Optional[Any] =feature_extractor.pad([{'input_values': inputs}] , return_tensors='np')
self.assertTrue(np_processed.input_values.dtype == np.floataa)
_UpperCAmelCase : List[str] =feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt')
self.assertTrue(pt_processed.input_values.dtype == torch.floataa)
def lowerCAmelCase ( self , snake_case) -> int:
'''simple docstring'''
from datasets import load_dataset
_UpperCAmelCase : Optional[Any] =load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation')
# automatic decoding with librispeech
_UpperCAmelCase : int =ds.sort('id').select(range(snake_case))[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
# fmt: off
_UpperCAmelCase : List[str] =torch.tensor(
[-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76,
-1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33,
-1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36,
-0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69])
# fmt: on
_UpperCAmelCase : Dict =self._load_datasamples(1)
_UpperCAmelCase : Optional[Any] =ASTFeatureExtractor()
_UpperCAmelCase : Optional[Any] =feature_extractor(snake_case , return_tensors='pt').input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8))
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , snake_case , atol=1E-4))
| 331 | 1 |
"""simple docstring"""
import qiskit
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Union[str, Any] = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
UpperCamelCase : List[Any] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
UpperCamelCase : Tuple = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__magic_name__ : Any = single_qubit_measure(2, 2)
print(f'''Total count for various states are: {counts}''')
| 102 |
'''simple docstring'''
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class UpperCAmelCase :
def __init__( self : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple=9_9 , __lowerCamelCase : int=1_3 , __lowerCamelCase : List[str]=7 , __lowerCamelCase : Dict=9 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[str]=False , __lowerCamelCase : List[Any]=3_2 , __lowerCamelCase : int=5 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Tuple=3_7 , __lowerCamelCase : Any=8 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Any=0.0_02 , __lowerCamelCase : List[str]=1 , __lowerCamelCase : str=0 , __lowerCamelCase : str=0 , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=None , ):
UpperCAmelCase__ :Tuple = parent
UpperCAmelCase__ :str = batch_size
UpperCAmelCase__ :int = encoder_seq_length
UpperCAmelCase__ :Optional[int] = decoder_seq_length
# For common tests
UpperCAmelCase__ :int = self.decoder_seq_length
UpperCAmelCase__ :List[Any] = is_training
UpperCAmelCase__ :Any = use_attention_mask
UpperCAmelCase__ :Tuple = use_labels
UpperCAmelCase__ :Optional[int] = vocab_size
UpperCAmelCase__ :Optional[Any] = hidden_size
UpperCAmelCase__ :Optional[Any] = num_hidden_layers
UpperCAmelCase__ :Tuple = num_attention_heads
UpperCAmelCase__ :str = d_ff
UpperCAmelCase__ :Tuple = relative_attention_num_buckets
UpperCAmelCase__ :int = dropout_rate
UpperCAmelCase__ :Dict = initializer_factor
UpperCAmelCase__ :int = eos_token_id
UpperCAmelCase__ :Tuple = pad_token_id
UpperCAmelCase__ :Tuple = decoder_start_token_id
UpperCAmelCase__ :List[str] = None
UpperCAmelCase__ :List[str] = decoder_layers
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return TaConfig.from_pretrained('''google/umt5-base''' )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : List[Any]=None , ):
if attention_mask is None:
UpperCAmelCase__ :Optional[Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCAmelCase__ :Any = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCAmelCase__ :List[str] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__lowerCamelCase )
if decoder_head_mask is None:
UpperCAmelCase__ :Optional[Any] = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__lowerCamelCase )
if cross_attn_head_mask is None:
UpperCAmelCase__ :List[Any] = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__lowerCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
UpperCAmelCase__ :Optional[int] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
UpperCAmelCase__ :List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCAmelCase__ :int = input_ids.clamp(self.pad_token_id + 1 )
UpperCAmelCase__ :Tuple = decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCAmelCase__ :Dict = self.get_config()
UpperCAmelCase__ :Union[str, Any] = config.num_attention_heads
UpperCAmelCase__ :Dict = self.prepare_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, input_dict
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
UpperCAmelCase__ , UpperCAmelCase__ :int = self.prepare_config_and_inputs()
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return TaConfig(
vocab_size=1_6_6 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , ):
UpperCAmelCase__ :str = UMTaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCAmelCase__ :Optional[int] = model(
input_ids=__lowerCamelCase , decoder_input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase , decoder_attention_mask=__lowerCamelCase , )
UpperCAmelCase__ :List[Any] = model(input_ids=__lowerCamelCase , decoder_input_ids=__lowerCamelCase )
UpperCAmelCase__ :Dict = result.last_hidden_state
UpperCAmelCase__ :Tuple = result.past_key_values
UpperCAmelCase__ :Dict = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__lowerCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , ):
UpperCAmelCase__ :Tuple = UMTaModel(config=__lowerCamelCase ).get_decoder().to(__lowerCamelCase ).eval()
# first forward pass
UpperCAmelCase__ :Dict = model(__lowerCamelCase , use_cache=__lowerCamelCase )
UpperCAmelCase__ :Any = model(__lowerCamelCase )
UpperCAmelCase__ :List[str] = model(__lowerCamelCase , use_cache=__lowerCamelCase )
self.parent.assertTrue(len(__lowerCamelCase ) == len(__lowerCamelCase ) )
self.parent.assertTrue(len(__lowerCamelCase ) == len(__lowerCamelCase ) + 1 )
UpperCAmelCase__ , UpperCAmelCase__ :int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase__ :int = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
UpperCAmelCase__ :Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase__ :Union[str, Any] = model(__lowerCamelCase )['''last_hidden_state''']
UpperCAmelCase__ :str = model(__lowerCamelCase , past_key_values=__lowerCamelCase )['''last_hidden_state''']
# select random slice
UpperCAmelCase__ :Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase__ :Any = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCAmelCase__ :Union[str, Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
def __SCREAMING_SNAKE_CASE ( self : Any , __lowerCamelCase : Tuple , __lowerCamelCase : str , ):
UpperCAmelCase__ :Optional[Any] = UMTaModel(config=__lowerCamelCase ).to(__lowerCamelCase ).half().eval()
UpperCAmelCase__ :Any = model(**__lowerCamelCase )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(__lowerCamelCase ).any().item() )
@require_torch
class UpperCAmelCase ( _snake_case , _snake_case , _snake_case , unittest.TestCase ):
UpperCAmelCase = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
UpperCAmelCase = (UMTaForConditionalGeneration,) if is_torch_available() else ()
UpperCAmelCase = (
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = True
UpperCAmelCase = True
# The small UMT5 model needs higher percentages for CPU/MP tests
UpperCAmelCase = [0.8, 0.9]
def __SCREAMING_SNAKE_CASE ( self : str ):
UpperCAmelCase__ :Optional[Any] = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def __SCREAMING_SNAKE_CASE ( self : int ):
UpperCAmelCase__ :int = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase__ :Optional[int] = UMTaModel(config_and_inputs[0] ).to(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__lowerCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=__lowerCamelCase , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
UpperCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
UpperCAmelCase__ :Tuple = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
UpperCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase__ :int = config_and_inputs[0]
UpperCAmelCase__ :Optional[Any] = UMTaForConditionalGeneration(__lowerCamelCase ).eval()
model.to(__lowerCamelCase )
UpperCAmelCase__ :List[str] = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=__lowerCamelCase ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__lowerCamelCase ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__lowerCamelCase ),
}
for attn_name, (name, mask) in zip(__lowerCamelCase , head_masking.items() ):
UpperCAmelCase__ :Union[str, Any] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
UpperCAmelCase__ :Optional[Any] = torch.ones(
config.num_decoder_layers , config.num_heads , device=__lowerCamelCase )
UpperCAmelCase__ :Union[str, Any] = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=__lowerCamelCase , return_dict_in_generate=__lowerCamelCase , **__lowerCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
UpperCAmelCase__ :List[Any] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def __SCREAMING_SNAKE_CASE ( self : Any ):
UpperCAmelCase__ :Optional[Any] = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=__lowerCamelCase ).to(__lowerCamelCase )
UpperCAmelCase__ :List[str] = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=__lowerCamelCase , legacy=__lowerCamelCase )
UpperCAmelCase__ :Dict = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
UpperCAmelCase__ :Optional[Any] = tokenizer(__lowerCamelCase , return_tensors='''pt''' , padding=__lowerCamelCase ).input_ids
# fmt: off
UpperCAmelCase__ :List[Any] = torch.tensor(
[
[ 3_8_5_3_0, 2_1_0_7_0_3, 2_5_6_2_9_9, 1_4_1_0, 2_5_6_2_9_8, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_2_6, 3_2_1, 6_7_1, 2_5_9_2_2, 2_5_6_2_9_9, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_4_6_0, 3_3_9, 3_1_2, 1_9_0_1_4, 1_0_6_2_0, 7_5_8, 2_5_6_2_9_9, 2_3_5_5,2_7_4, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_1_7, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 3_0_1, 2_5_6_2_9_8, 2_7_5, 1_1_9_9_8_3,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_2_0, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 2_2_3_4, 2_8_9, 2_2_7_5, 3_3_3,6_1_3_9_1, 2_8_9, 2_5_6_2_9_8, 5_4_3, 2_5_6_2_9_7, 1_6_8_7_1_4, 3_2_9, 2_5_6_2_9_6,2_7_4, 1],
] )
# fmt: on
torch.testing.assert_allclose(__lowerCamelCase , __lowerCamelCase )
UpperCAmelCase__ :Dict = model.generate(input_ids.to(__lowerCamelCase ) )
UpperCAmelCase__ :str = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ <extra_id_56>ajลกietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajลกie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> ํผํด[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
UpperCAmelCase__ :Dict = tokenizer.batch_decode(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
| 467 | 0 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
snake_case : Any = logging.getLogger(__name__)
class lowerCAmelCase__ :
def __init__( self : Union[str, Any]):
A__ : Dict = False
def _lowercase ( self : Optional[int] , _A : int , _A : Optional[int] , _A : List[str] , _A : Optional[Any]):
if not self.initialized:
A__ : List[Any] = RagRetriever(
_A , question_encoder_tokenizer=_A , generator_tokenizer=_A , index=_A , init_retrieval=_A , )
A__ : Dict = True
def _lowercase ( self : Union[str, Any]):
self.retriever.index.init_index()
def _lowercase ( self : List[str] , _A : Optional[int] , _A : List[str]):
A__ , A__ : str = self.retriever._main_retrieve(_A , _A)
return doc_ids, retrieved_doc_embeds
class lowerCAmelCase__ ( UpperCamelCase ):
def __init__( self : Optional[Any] , _A : Union[str, Any] , _A : Tuple , _A : int , _A : Tuple , _A : Union[str, Any]=None):
if index is not None and index.is_initialized() and len(_A) > 0:
raise ValueError(
"When using Ray for distributed fine-tuning, "
"you'll need to provide the paths instead, "
"as the dataset and the index are loaded "
"separately. More info in examples/rag/use_own_knowledge_dataset.py ")
super().__init__(
_A , question_encoder_tokenizer=_A , generator_tokenizer=_A , index=_A , init_retrieval=_A , )
A__ : str = retrieval_workers
if len(self.retrieval_workers) > 0:
ray.get(
[
worker.create_rag_retriever.remote(_A , _A , _A , _A)
for worker in self.retrieval_workers
])
def _lowercase ( self : Optional[Any]):
logger.info("initializing retrieval")
if len(self.retrieval_workers) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers])
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _lowercase ( self : Union[str, Any] , _A : List[str] , _A : Optional[Any]):
if len(self.retrieval_workers) > 0:
# Select a random retrieval actor.
A__ : Optional[Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers) - 1)]
A__ , A__ : List[Any] = ray.get(random_worker.retrieve.remote(_A , _A))
else:
A__ , A__ : Union[str, Any] = self._main_retrieve(_A , _A)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_A)
@classmethod
def _lowercase ( cls : int , _A : Optional[int] , _A : List[str]=None , **_A : Optional[Any]):
return super(_A , cls).get_tokenizers(_A , _A , **_A)
@classmethod
def _lowercase ( cls : Any , _A : Union[str, Any] , _A : Optional[Any] , _A : List[str]=None , **_A : Optional[Any]):
A__ : str = kwargs.pop("config" , _A) or RagConfig.from_pretrained(_A , **_A)
A__ : int = RagTokenizer.from_pretrained(_A , config=_A)
A__ : List[str] = rag_tokenizer.question_encoder
A__ : str = rag_tokenizer.generator
if indexed_dataset is not None:
A__ : Tuple = "custom"
A__ : Tuple = CustomHFIndex(config.retrieval_vector_size , _A)
else:
A__ : Union[str, Any] = cls._build_index(_A)
return cls(
_A , question_encoder_tokenizer=_A , generator_tokenizer=_A , retrieval_workers=_A , index=_A , ) | 182 |
from __future__ import annotations
snake_case : Optional[Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCAmelCase__ :
def __init__( self : Optional[Any] , _A : dict[str, list[str]] , _A : str):
A__ : Optional[Any] = graph
# mapping node to its parent in resulting breadth first tree
A__ : dict[str, str | None] = {}
A__ : List[str] = source_vertex
def _lowercase ( self : List[Any]):
A__ : str = {self.source_vertex}
A__ : List[str] = None
A__ : List[str] = [self.source_vertex] # first in first out queue
while queue:
A__ : int = queue.pop(0)
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_A)
A__ : Any = vertex
queue.append(_A)
def _lowercase ( self : int , _A : str):
if target_vertex == self.source_vertex:
return self.source_vertex
A__ : List[Any] = self.parent.get(_A)
if target_vertex_parent is None:
A__ : Union[str, Any] = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(_A)
return self.shortest_path(_A) + F'->{target_vertex}'
if __name__ == "__main__":
snake_case : Any = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo')) | 182 | 1 |
'''simple docstring'''
import math
import sys
def _lowercase ( UpperCamelCase__ : str ):
__A : Optional[Any] = ''
try:
with open(UpperCamelCase__, 'rb' ) as binary_file:
__A : str = binary_file.read()
for dat in data:
__A : Tuple = f"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def _lowercase ( UpperCamelCase__ : str ):
__A : Dict = {'0': '0', '1': '1'}
__A ,__A : List[str] = '', ''
__A : Optional[int] = len(UpperCamelCase__ )
for i in range(len(UpperCamelCase__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__A : Dict = lexicon[curr_string]
result += last_match_id
__A : List[str] = last_match_id + '0'
if math.loga(UpperCamelCase__ ).is_integer():
__A : Optional[Any] = {}
for curr_key in list(UpperCamelCase__ ):
__A : Optional[Any] = lexicon.pop(UpperCamelCase__ )
__A : Dict = new_lex
__A : Tuple = last_match_id + '1'
index += 1
__A : Optional[Any] = ''
return result
def _lowercase ( UpperCamelCase__ : str, UpperCamelCase__ : str ):
__A : List[Any] = 8
try:
with open(UpperCamelCase__, 'wb' ) as opened_file:
__A : Optional[int] = [
to_write[i : i + byte_length]
for i in range(0, len(UpperCamelCase__ ), UpperCamelCase__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(UpperCamelCase__, 2 ).to_bytes(1, byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def _lowercase ( UpperCamelCase__ : str ):
__A : int = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
__A : Optional[Any] = data_bits[counter:]
__A : str = data_bits[counter + 1 :]
return data_bits
def _lowercase ( UpperCamelCase__ : str, UpperCamelCase__ : str ):
__A : Optional[Any] = read_file_binary(UpperCamelCase__ )
__A : List[str] = remove_prefix(UpperCamelCase__ )
__A : List[Any] = decompress_data(UpperCamelCase__ )
write_file_binary(UpperCamelCase__, UpperCamelCase__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 365 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/config.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/config.json'
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class _lowerCamelCase ( snake_case_ ):
'''simple docstring'''
__lowercase : List[Any] = '''fnet'''
def __init__( self , __lowercase=32_000 , __lowercase=768 , __lowercase=12 , __lowercase=3_072 , __lowercase="gelu_new" , __lowercase=0.1 , __lowercase=512 , __lowercase=4 , __lowercase=0.0_2 , __lowercase=1E-12 , __lowercase=False , __lowercase=512 , __lowercase=3 , __lowercase=1 , __lowercase=2 , **__lowercase , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
__A : Union[str, Any] = vocab_size
__A : Dict = max_position_embeddings
__A : List[str] = hidden_size
__A : Tuple = num_hidden_layers
__A : Optional[int] = intermediate_size
__A : Dict = hidden_act
__A : List[str] = hidden_dropout_prob
__A : str = initializer_range
__A : Dict = type_vocab_size
__A : int = layer_norm_eps
__A : Tuple = use_tpu_fourier_optimizations
__A : Optional[Any] = tpu_short_seq_length
| 365 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pi
def lowerCamelCase_ ( lowercase__ , lowercase__ , lowercase__):
if (inductance, frequency, reactance).count(0) != 1:
raise ValueError("One and only one argument must be 0")
if inductance < 0:
raise ValueError("Inductance cannot be negative")
if frequency < 0:
raise ValueError("Frequency cannot be negative")
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative")
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 187 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class lowercase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int=1 , __lowerCamelCase : Tuple=False , **__lowerCamelCase : Dict ) -> str:
'''simple docstring'''
super().__init__(**__lowerCamelCase )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = d_embed
lowerCamelCase__ = d_proj
lowerCamelCase__ = cutoffs + [vocab_size]
lowerCamelCase__ = [0] + self.cutoffs
lowerCamelCase__ = div_val
lowerCamelCase__ = self.cutoffs[0]
lowerCamelCase__ = len(self.cutoffs ) - 1
lowerCamelCase__ = self.shortlist_size + self.n_clusters
lowerCamelCase__ = keep_order
lowerCamelCase__ = []
lowerCamelCase__ = []
def a__ ( self : Optional[int] , __lowerCamelCase : str ) -> List[str]:
'''simple docstring'''
if self.n_clusters > 0:
lowerCamelCase__ = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="zeros" , trainable=__lowerCamelCase , name="cluster_weight" )
lowerCamelCase__ = self.add_weight(
shape=(self.n_clusters,) , initializer="zeros" , trainable=__lowerCamelCase , name="cluster_bias" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
lowerCamelCase__ = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_projs_._{i}''' , )
self.out_projs.append(__lowerCamelCase )
else:
self.out_projs.append(__lowerCamelCase )
lowerCamelCase__ = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_layers_._{i}_._weight''' , )
lowerCamelCase__ = self.add_weight(
shape=(self.vocab_size,) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
lowerCamelCase__ , lowerCamelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCamelCase__ = self.d_embed // (self.div_val**i)
lowerCamelCase__ = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_projs_._{i}''' )
self.out_projs.append(__lowerCamelCase )
lowerCamelCase__ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_layers_._{i}_._weight''' , )
lowerCamelCase__ = self.add_weight(
shape=(r_idx - l_idx,) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(__lowerCamelCase )
@staticmethod
def a__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[str]=None ) -> str:
'''simple docstring'''
lowerCamelCase__ = x
if proj is not None:
lowerCamelCase__ = tf.einsum("ibd,ed->ibe" , __lowerCamelCase , __lowerCamelCase )
return tf.einsum("ibd,nd->ibn" , __lowerCamelCase , __lowerCamelCase ) + b
@staticmethod
def a__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = shape_list(__lowerCamelCase )
lowerCamelCase__ = tf.range(lp_size[0] , dtype=target.dtype )
lowerCamelCase__ = tf.stack([r, target] , 1 )
return tf.gather_nd(__lowerCamelCase , __lowerCamelCase )
def a__ ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : str=True , __lowerCamelCase : Tuple=False ) -> int:
'''simple docstring'''
lowerCamelCase__ = 0
if self.n_clusters == 0:
lowerCamelCase__ = self._logit(__lowerCamelCase , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
lowerCamelCase__ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__lowerCamelCase , logits=__lowerCamelCase )
lowerCamelCase__ = tf.nn.log_softmax(__lowerCamelCase , axis=-1 )
else:
lowerCamelCase__ = shape_list(__lowerCamelCase )
lowerCamelCase__ = []
lowerCamelCase__ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
lowerCamelCase__ , lowerCamelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
lowerCamelCase__ = (target >= l_idx) & (target < r_idx)
lowerCamelCase__ = tf.where(__lowerCamelCase )
lowerCamelCase__ = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase ) - l_idx
if self.div_val == 1:
lowerCamelCase__ = self.out_layers[0][0][l_idx:r_idx]
lowerCamelCase__ = self.out_layers[0][1][l_idx:r_idx]
else:
lowerCamelCase__ = self.out_layers[i][0]
lowerCamelCase__ = self.out_layers[i][1]
if i == 0:
lowerCamelCase__ = tf.concat([cur_W, self.cluster_weight] , 0 )
lowerCamelCase__ = tf.concat([cur_b, self.cluster_bias] , 0 )
lowerCamelCase__ = self._logit(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , self.out_projs[0] )
lowerCamelCase__ = tf.nn.log_softmax(__lowerCamelCase )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
lowerCamelCase__ = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ = self._gather_logprob(__lowerCamelCase , __lowerCamelCase )
else:
lowerCamelCase__ = self._logit(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , self.out_projs[i] )
lowerCamelCase__ = tf.nn.log_softmax(__lowerCamelCase )
lowerCamelCase__ = self.cutoffs[0] + i - 1 # No probability for the head cluster
lowerCamelCase__ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(__lowerCamelCase )
if target is not None:
lowerCamelCase__ = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ = self._gather_logprob(__lowerCamelCase , __lowerCamelCase )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(__lowerCamelCase , -cur_logprob , shape_list(__lowerCamelCase ) )
lowerCamelCase__ = tf.concat(__lowerCamelCase , axis=-1 )
if target is not None:
if return_mean:
lowerCamelCase__ = tf.reduce_mean(__lowerCamelCase )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(__lowerCamelCase )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(__lowerCamelCase , name=self.name , aggregation="mean" if return_mean else "" )
return out
| 187 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = """fnet"""
def __init__( self , A_=3_2000 , A_=768 , A_=12 , A_=3072 , A_="gelu_new" , A_=0.1 , A_=512 , A_=4 , A_=0.02 , A_=1e-12 , A_=False , A_=512 , A_=3 , A_=1 , A_=2 , **A_ , ) ->str:
'''simple docstring'''
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : Dict = max_position_embeddings
__lowerCAmelCase : Dict = hidden_size
__lowerCAmelCase : Dict = num_hidden_layers
__lowerCAmelCase : Any = intermediate_size
__lowerCAmelCase : List[Any] = hidden_act
__lowerCAmelCase : Dict = hidden_dropout_prob
__lowerCAmelCase : Tuple = initializer_range
__lowerCAmelCase : Tuple = type_vocab_size
__lowerCAmelCase : Dict = layer_norm_eps
__lowerCAmelCase : int = use_tpu_fourier_optimizations
__lowerCAmelCase : Tuple = tpu_short_seq_length
| 492 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
_UpperCamelCase = logging.get_logger(__name__)
class __lowercase (_UpperCAmelCase ):
def __init__( self , *A_ , **A_ ) ->None:
'''simple docstring'''
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' , A_ , )
super().__init__(*A_ , **A_ )
| 492 | 1 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
lowerCAmelCase__ = "tiny-wmt19-en-ru"
# Build
# borrowed from a test
lowerCAmelCase__ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
lowerCAmelCase__ = dict(zip(vocab, range(len(vocab))))
lowerCAmelCase__ = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(tmpdirname)
lowerCAmelCase__ = build_dir / VOCAB_FILES_NAMES["src_vocab_file"]
lowerCAmelCase__ = build_dir / VOCAB_FILES_NAMES["tgt_vocab_file"]
lowerCAmelCase__ = build_dir / VOCAB_FILES_NAMES["merges_file"]
with open(src_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, "w") as fp:
fp.write("\n".join(merges))
lowerCAmelCase__ = FSMTTokenizer(
langs=["en", "ru"],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
lowerCAmelCase__ = FSMTConfig(
langs=["ru", "en"],
src_vocab_size=1_0_0_0,
tgt_vocab_size=1_0_0_0,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
lowerCAmelCase__ = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
lowerCAmelCase__ = tokenizer(["Making tiny model"], return_tensors="pt")
lowerCAmelCase__ = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 718 | from __future__ import annotations
from collections import Counter
from random import random
class _a :
"""simple docstring"""
def __init__( self ):
_lowercase ={}
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
_lowercase ={}
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
if nodea not in self.connections:
self.add_node(lowerCAmelCase_ )
if nodea not in self.connections:
self.add_node(lowerCAmelCase_ )
_lowercase =probability
def __lowerCAmelCase ( self ):
return list(self.connections )
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
_lowercase =0
_lowercase =random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __lowerCamelCase ( __a : str , __a : list[tuple[str, str, float]] , __a : int ) -> dict[str, int]:
_lowercase =MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__a , __a , __a )
_lowercase =Counter(graph.get_nodes() )
_lowercase =start
for _ in range(__a ):
_lowercase =graph.transition(__a )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 594 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowercase : int ={"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : int =[
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict =[
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__lowercase : int =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 54 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 118 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class __magic_name__ ( _a):
_UpperCAmelCase : int = '''facebook/bart-large-mnli'''
_UpperCAmelCase : Tuple = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
_UpperCAmelCase : Dict = '''text_classifier'''
_UpperCAmelCase : Optional[Any] = AutoTokenizer
_UpperCAmelCase : List[Any] = AutoModelForSequenceClassification
_UpperCAmelCase : List[Any] = ['''text''', ['''text''']]
_UpperCAmelCase : Optional[Any] = ['''text''']
def _UpperCAmelCase ( self : Any ):
super().setup()
UpperCAmelCase = self.model.config
UpperCAmelCase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
UpperCAmelCase = int(A__ )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def _UpperCAmelCase ( self : Union[str, Any] ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : List[str] ):
UpperCAmelCase = labels
return self.pre_processor(
[text] * len(A__ ) ,[f'''This example is {label}''' for label in labels] ,return_tensors="pt" ,padding="max_length" ,)
def _UpperCAmelCase ( self : Tuple ,__SCREAMING_SNAKE_CASE : Tuple ):
UpperCAmelCase = outputs.logits
UpperCAmelCase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 720 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase ={
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 405 | 0 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ : List[str] = get_tests_dir("fixtures/test_sentencepiece.model")
UpperCAmelCase__ : List[str] = {"target_lang": "fi", "source_lang": "en"}
UpperCAmelCase__ : str = ">>zh<<"
UpperCAmelCase__ : List[Any] = "Helsinki-NLP/"
if is_torch_available():
UpperCAmelCase__ : Tuple = "pt"
elif is_tf_available():
UpperCAmelCase__ : Union[str, Any] = "tf"
else:
UpperCAmelCase__ : Optional[Any] = "jax"
@require_sentencepiece
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Any = MarianTokenizer
snake_case__ :Any = False
snake_case__ :Optional[int] = True
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
super().setUp()
lowerCAmelCase__ = ["</s>", "<unk>", "โThis", "โis", "โa", "โt", "est", "\u0120", "<pad>"]
lowerCAmelCase__ = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
lowerCAmelCase__ = Path(self.tmpdirname )
save_json(__magic_name__ , save_dir / VOCAB_FILES_NAMES["vocab"] )
save_json(__magic_name__ , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(__magic_name__ , save_dir / VOCAB_FILES_NAMES["source_spm"] )
copyfile(__magic_name__ , save_dir / VOCAB_FILES_NAMES["target_spm"] )
lowerCAmelCase__ = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : str , **__magic_name__ : Optional[int] ):
"""simple docstring"""
return MarianTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = "</s>"
lowerCAmelCase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(__magic_name__ ) , 9 )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = MarianTokenizer.from_pretrained(f"""{ORG_NAME}opus-mt-en-de""" )
lowerCAmelCase__ = en_de_tokenizer(["I am a small frog"] , return_tensors=__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = [38, 121, 14, 697, 38848, 0]
self.assertListEqual(__magic_name__ , batch.input_ids[0] )
lowerCAmelCase__ = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(__magic_name__ )
lowerCAmelCase__ = [x.name for x in Path(__magic_name__ ).glob("*" )]
self.assertIn("source.spm" , __magic_name__ )
MarianTokenizer.from_pretrained(__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = tok(
["I am a small frog" * 1000, "I am a small frog"] , padding=__magic_name__ , truncation=__magic_name__ , return_tensors=__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = tok(["I am a tiny frog", "I am a small frog"] , padding=__magic_name__ , return_tensors=__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = {"input_ids": [[43495, 462, 20, 42164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 38999, 6, 8, 464, 132, 1703, 492, 13, 4669, 37867, 13, 7525, 27, 1593, 988, 13, 33972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 12338, 2, 13958, 387, 2, 3629, 6953, 188, 2900, 2, 13958, 8011, 11501, 23, 8460, 4073, 34009, 20, 435, 11439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 37867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 26453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10767, 6, 316, 304, 4239, 3, 0], [148, 15722, 19, 1839, 12, 1350, 13, 22327, 5082, 5418, 47567, 35938, 59, 318, 19552, 108, 2183, 54, 14976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 19088, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100], [36, 6395, 12570, 39147, 11597, 6, 266, 4, 45405, 7296, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" )
lowerCAmelCase__ = "Tรคmรค on testi"
lowerCAmelCase__ = "This is a test"
lowerCAmelCase__ = [76, 7, 2047, 2]
lowerCAmelCase__ = [69, 12, 11, 940, 2]
lowerCAmelCase__ = tokenizer(__magic_name__ ).input_ids
self.assertListEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = tokenizer(text_target=__magic_name__ ).input_ids
self.assertListEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
| 48 |
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = 0
if start < end:
lowerCAmelCase__ = randint(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = a[pivot]
lowerCAmelCase__ = temp
lowerCAmelCase__ ,lowerCAmelCase__ = _in_place_partition(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
count += _in_place_quick_sort(UpperCamelCase_ , UpperCamelCase_ , p - 1 )
count += _in_place_quick_sort(UpperCamelCase_ , p + 1 , UpperCamelCase_ )
return count
def A ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = 0
lowerCAmelCase__ = randint(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = a[pivot]
lowerCAmelCase__ = temp
lowerCAmelCase__ = start - 1
for index in range(UpperCamelCase_ , UpperCamelCase_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowerCAmelCase__ = new_pivot_index + 1
lowerCAmelCase__ = a[new_pivot_index]
lowerCAmelCase__ = a[index]
lowerCAmelCase__ = temp
lowerCAmelCase__ = a[new_pivot_index + 1]
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = temp
return new_pivot_index + 1, count
UpperCAmelCase__ : Tuple = TemporaryFile()
UpperCAmelCase__ : List[str] = 1_00 # 1000 elements are to be sorted
UpperCAmelCase__ , UpperCAmelCase__ : Dict = 0, 1 # mean and standard deviation
UpperCAmelCase__ : Tuple = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase__ : Optional[Any] = np.load(outfile)
UpperCAmelCase__ : Any = len(M) - 1
UpperCAmelCase__ : Tuple = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 48 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : List[str] = {'configuration_timm_backbone': ['TimmBackboneConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Any = ['TimmBackbone']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
_a : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 702 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_a : str = logging.get_logger(__name__)
_a : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_a : Optional[Any] = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
_a : Union[str, Any] = {
'yjernite/retribert-base-uncased': 512,
}
_a : Tuple = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class a_ ( a ):
A__ : List[str] = VOCAB_FILES_NAMES
A__ : Any = PRETRAINED_VOCAB_FILES_MAP
A__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Any = PRETRAINED_INIT_CONFIGURATION
A__ : Optional[Any] = RetriBertTokenizer
A__ : Any = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Dict="[UNK]" , UpperCAmelCase__ : str="[SEP]" , UpperCAmelCase__ : Union[str, Any]="[PAD]" , UpperCAmelCase__ : Dict="[CLS]" , UpperCAmelCase__ : Optional[Any]="[MASK]" , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Optional[int]=None , **UpperCAmelCase__ : Dict , ):
"""simple docstring"""
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , )
snake_case : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCAmelCase__ ) != tokenize_chinese_chars
):
snake_case : int = getattr(UpperCAmelCase__ , normalizer_state.pop('''type''' ) )
snake_case : List[Any] = do_lower_case
snake_case : Union[str, Any] = strip_accents
snake_case : int = tokenize_chinese_chars
snake_case : int = normalizer_class(**UpperCAmelCase__ )
snake_case : Union[str, Any] = do_lower_case
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any]=None ):
"""simple docstring"""
snake_case : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
snake_case : List[Any] = [self.sep_token_id]
snake_case : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
"""simple docstring"""
snake_case : Tuple = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 84 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__UpperCAmelCase = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 90 |
'''simple docstring'''
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _snake_case ( A , A , A ) -> Union[str, Any]:
lowerCAmelCase__ = OmegaConf.load(A )
lowerCAmelCase__ = torch.load(A , map_location='''cpu''' )['''model''']
lowerCAmelCase__ = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCAmelCase__ = {}
lowerCAmelCase__ = '''first_stage_model.'''
for key in keys:
if key.startswith(A ):
lowerCAmelCase__ = state_dict[key]
# extract state_dict for UNetLDM
lowerCAmelCase__ = {}
lowerCAmelCase__ = '''model.diffusion_model.'''
for key in keys:
if key.startswith(A ):
lowerCAmelCase__ = state_dict[key]
lowerCAmelCase__ = config.model.params.first_stage_config.params
lowerCAmelCase__ = config.model.params.unet_config.params
lowerCAmelCase__ = VQModel(**A ).eval()
vqvae.load_state_dict(A )
lowerCAmelCase__ = UNetLDMModel(**A ).eval()
unet.load_state_dict(A )
lowerCAmelCase__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=A , )
lowerCAmelCase__ = LDMPipeline(A , A , A )
pipeline.save_pretrained(A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
__UpperCAmelCase = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path) | 90 | 1 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
UpperCAmelCase_ : Union[str, Any] = logging.getLogger(__name__)
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Tuple = """summarization"""
snake_case__ : Tuple = ["""loss"""]
snake_case__ : int = ROUGE_KEYS
snake_case__ : int = """rouge2"""
def __init__( self : Tuple , __lowerCamelCase : List[str] , **__lowerCamelCase : List[str] ):
if hparams.sortish_sampler and hparams.gpus > 1:
UpperCamelCase :Optional[int] = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(__lowerCamelCase , num_labels=__lowerCamelCase , mode=self.mode , **__lowerCamelCase )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
UpperCamelCase :str = Path(self.output_dir ) / """metrics.json"""
UpperCamelCase :Dict = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
UpperCamelCase :List[str] = 0
UpperCamelCase :Dict = defaultdict(__lowerCamelCase )
UpperCamelCase :Optional[Any] = self.config.model_type
UpperCamelCase :List[str] = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
UpperCamelCase :dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
UpperCamelCase :List[str] = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
UpperCamelCase :int = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
UpperCamelCase :Dict = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F"""target_lens: {self.target_lens}"""
assert self.target_lens["train"] <= self.target_lens["test"], F"""target_lens: {self.target_lens}"""
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
UpperCamelCase :Optional[Any] = get_git_info()["""repo_sha"""]
UpperCamelCase :List[str] = hparams.num_workers
UpperCamelCase :Tuple = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , __lowerCamelCase ):
UpperCamelCase :Optional[int] = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
UpperCamelCase :Union[str, Any] = self.decoder_start_token_id
UpperCamelCase :int = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
UpperCamelCase :Union[str, Any] = False
UpperCamelCase :Tuple = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
UpperCamelCase :int = self.hparams.eval_max_gen_length
else:
UpperCamelCase :Dict = self.model.config.max_length
UpperCamelCase :Optional[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def _A ( self : List[Any] , __lowerCamelCase : Dict[str, torch.Tensor] ):
UpperCamelCase :List[Any] = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(__lowerCamelCase , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
UpperCamelCase :Dict = True
return readable_batch
def _A ( self : Union[str, Any] , __lowerCamelCase : Tuple , **__lowerCamelCase : List[str] ):
return self.model(__lowerCamelCase , **__lowerCamelCase )
def _A ( self : List[Any] , __lowerCamelCase : List[int] ):
UpperCamelCase :Optional[Any] = self.tokenizer.batch_decode(
__lowerCamelCase , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase )
return lmap(str.strip , __lowerCamelCase )
def _A ( self : List[Any] , __lowerCamelCase : dict ):
UpperCamelCase :Any = self.tokenizer.pad_token_id
UpperCamelCase :Dict = batch["""input_ids"""], batch["""attention_mask"""]
UpperCamelCase :int = batch["""labels"""]
if isinstance(self.model , __lowerCamelCase ):
UpperCamelCase :List[str] = self.model._shift_right(__lowerCamelCase )
else:
UpperCamelCase :Optional[int] = shift_tokens_right(__lowerCamelCase , __lowerCamelCase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
UpperCamelCase :Any = decoder_input_ids
self.save_readable_batch(__lowerCamelCase )
UpperCamelCase :int = self(__lowerCamelCase , attention_mask=__lowerCamelCase , decoder_input_ids=__lowerCamelCase , use_cache=__lowerCamelCase )
UpperCamelCase :Any = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
UpperCamelCase :Dict = nn.CrossEntropyLoss(ignore_index=__lowerCamelCase )
assert lm_logits.shape[-1] == self.vocab_size
UpperCamelCase :Any = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
UpperCamelCase :Optional[int] = nn.functional.log_softmax(__lowerCamelCase , dim=-1 )
UpperCamelCase :str = label_smoothed_nll_loss(
__lowerCamelCase , __lowerCamelCase , self.hparams.label_smoothing , ignore_index=__lowerCamelCase )
return (loss,)
@property
def _A ( self : List[str] ):
return self.tokenizer.pad_token_id
def _A ( self : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ):
UpperCamelCase :Optional[int] = self._step(__lowerCamelCase )
UpperCamelCase :Optional[int] = dict(zip(self.loss_names , __lowerCamelCase ) )
# tokens per batch
UpperCamelCase :Dict = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
UpperCamelCase :Optional[Any] = batch["""input_ids"""].shape[0]
UpperCamelCase :Union[str, Any] = batch["""input_ids"""].eq(self.pad ).sum()
UpperCamelCase :int = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def _A ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : Dict ):
return self._generative_step(__lowerCamelCase )
def _A ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict="val" ):
self.step_count += 1
UpperCamelCase :Any = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
UpperCamelCase :List[str] = losses["""loss"""]
UpperCamelCase :Optional[int] = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
UpperCamelCase :List[Any] = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
UpperCamelCase :torch.FloatTensor = torch.tensor(__lowerCamelCase ).type_as(__lowerCamelCase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(__lowerCamelCase )
UpperCamelCase :Optional[Any] = {F"""{prefix}_avg_{k}""": x for k, x in losses.items()}
UpperCamelCase :Tuple = self.step_count
self.metrics[prefix].append(__lowerCamelCase ) # callback writes this to self.metrics_save_path
UpperCamelCase :List[str] = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F"""{prefix}_loss""": loss,
F"""{prefix}_{self.val_metric}""": metric_tensor,
}
def _A ( self : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] ):
return calculate_rouge(__lowerCamelCase , __lowerCamelCase )
def _A ( self : int , __lowerCamelCase : dict ):
UpperCamelCase :Tuple = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
UpperCamelCase :Any = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=__lowerCamelCase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
UpperCamelCase :Dict = (time.time() - ta) / batch["""input_ids"""].shape[0]
UpperCamelCase :List[str] = self.ids_to_clean_text(__lowerCamelCase )
UpperCamelCase :List[str] = self.ids_to_clean_text(batch["""labels"""] )
UpperCamelCase :List[str] = self._step(__lowerCamelCase )
UpperCamelCase :List[str] = dict(zip(self.loss_names , __lowerCamelCase ) )
UpperCamelCase :Dict = self.calc_generative_metrics(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = np.mean(lmap(__lowerCamelCase , __lowerCamelCase ) )
base_metrics.update(gen_time=__lowerCamelCase , gen_len=__lowerCamelCase , preds=__lowerCamelCase , target=__lowerCamelCase , **__lowerCamelCase )
return base_metrics
def _A ( self : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ):
return self._generative_step(__lowerCamelCase )
def _A ( self : Optional[int] , __lowerCamelCase : Dict ):
return self.validation_epoch_end(__lowerCamelCase , prefix="""test""" )
def _A ( self : Optional[int] , __lowerCamelCase : Any ):
UpperCamelCase :List[Any] = self.n_obs[type_path]
UpperCamelCase :int = self.target_lens[type_path]
UpperCamelCase :Union[str, Any] = self.dataset_class(
self.tokenizer , type_path=__lowerCamelCase , n_obs=__lowerCamelCase , max_target_length=__lowerCamelCase , **self.dataset_kwargs , )
return dataset
def _A ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : bool = False ):
UpperCamelCase :Optional[int] = self.get_dataset(__lowerCamelCase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
UpperCamelCase :str = dataset.make_sortish_sampler(__lowerCamelCase , distributed=self.hparams.gpus > 1 )
return DataLoader(
__lowerCamelCase , batch_size=__lowerCamelCase , collate_fn=dataset.collate_fn , shuffle=__lowerCamelCase , num_workers=self.num_workers , sampler=__lowerCamelCase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
UpperCamelCase :Dict = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
__lowerCamelCase , batch_sampler=__lowerCamelCase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
__lowerCamelCase , batch_size=__lowerCamelCase , collate_fn=dataset.collate_fn , shuffle=__lowerCamelCase , num_workers=self.num_workers , sampler=__lowerCamelCase , )
def _A ( self : Any ):
UpperCamelCase :int = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=__lowerCamelCase )
return dataloader
def _A ( self : Any ):
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def _A ( self : Any ):
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def _A ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : int ):
BaseTransformer.add_model_specific_args(__lowerCamelCase , __lowerCamelCase )
add_generic_args(__lowerCamelCase , __lowerCamelCase )
parser.add_argument(
"""--max_source_length""" , default=1_024 , type=__lowerCamelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=56 , type=__lowerCamelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=142 , type=__lowerCamelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=142 , type=__lowerCamelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=__lowerCamelCase )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=__lowerCamelCase )
parser.add_argument("""--max_tokens_per_batch""" , type=__lowerCamelCase , default=__lowerCamelCase )
parser.add_argument("""--logger_name""" , type=__lowerCamelCase , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=__lowerCamelCase , default=-1 , required=__lowerCamelCase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=__lowerCamelCase , default=500 , required=__lowerCamelCase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=__lowerCamelCase , default=-1 , required=__lowerCamelCase , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=__lowerCamelCase , default="""summarization""" , required=__lowerCamelCase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=__lowerCamelCase , default=0.0 , required=__lowerCamelCase )
parser.add_argument("""--src_lang""" , type=__lowerCamelCase , default="""""" , required=__lowerCamelCase )
parser.add_argument("""--tgt_lang""" , type=__lowerCamelCase , default="""""" , required=__lowerCamelCase )
parser.add_argument("""--eval_beams""" , type=__lowerCamelCase , default=__lowerCamelCase , required=__lowerCamelCase )
parser.add_argument(
"""--val_metric""" , type=__lowerCamelCase , default=__lowerCamelCase , required=__lowerCamelCase , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=__lowerCamelCase , default=__lowerCamelCase , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=__lowerCamelCase , default=1 , required=__lowerCamelCase , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=__lowerCamelCase , default=-1 , required=__lowerCamelCase , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Union[str, Any] = """translation"""
snake_case__ : str = ["""loss"""]
snake_case__ : Any = ["""bleu"""]
snake_case__ : List[Any] = """bleu"""
def __init__( self : List[str] , __lowerCamelCase : Tuple , **__lowerCamelCase : Tuple ):
super().__init__(__lowerCamelCase , **__lowerCamelCase )
UpperCamelCase :Dict = hparams.src_lang
UpperCamelCase :List[Any] = hparams.tgt_lang
def _A ( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
return calculate_bleu(__lowerCamelCase , __lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str , __magic_name__ : Tuple=None ) -> SummarizationModule:
"""simple docstring"""
Path(args.output_dir ).mkdir(exist_ok=__magic_name__ )
check_output_dir(__magic_name__ , expected_items=3 )
if model is None:
if "summarization" in args.task:
UpperCamelCase :SummarizationModule = SummarizationModule(__magic_name__ )
else:
UpperCamelCase :SummarizationModule = TranslationModule(__magic_name__ )
UpperCamelCase :Tuple = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
UpperCamelCase :Optional[int] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
UpperCamelCase :List[Any] = os.environ.get("""WANDB_PROJECT""" , __magic_name__ )
UpperCamelCase :Any = WandbLogger(name=model.output_dir.name , project=__magic_name__ )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
UpperCamelCase :Union[str, Any] = WandbLogger(name=model.output_dir.name , project=f"""hf_{dataset}""" )
if args.early_stopping_patience >= 0:
UpperCamelCase :Tuple = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
UpperCamelCase :str = False
UpperCamelCase :List[str] = args.val_metric == """loss"""
UpperCamelCase :pl.Trainer = generic_train(
__magic_name__ , __magic_name__ , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , __magic_name__ ) , early_stopping_callback=__magic_name__ , logger=__magic_name__ , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
UpperCamelCase :Optional[int] = """"""
UpperCamelCase :Dict = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=__magic_name__ ) )
if checkpoints:
UpperCamelCase :Union[str, Any] = checkpoints[-1]
UpperCamelCase :Optional[int] = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
UpperCAmelCase_ : Optional[int] = pl.Trainer.add_argparse_args(parser)
UpperCAmelCase_ : List[Any] = SummarizationModule.add_model_specific_args(parser, os.getcwd())
UpperCAmelCase_ : Dict = parser.parse_args()
main(args)
| 702 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Any = """trocr"""
snake_case__ : str = ["""past_key_values"""]
snake_case__ : str = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self : List[str] , __lowerCamelCase : int=50_265 , __lowerCamelCase : Tuple=1_024 , __lowerCamelCase : Dict=12 , __lowerCamelCase : Tuple=16 , __lowerCamelCase : int=4_096 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : List[Any]=512 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : Any=0.0 , __lowerCamelCase : Any=2 , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : str=0.0 , __lowerCamelCase : Any=True , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : int=True , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : List[str]=0 , __lowerCamelCase : int=2 , **__lowerCamelCase : Dict , ):
UpperCamelCase :Optional[Any] = vocab_size
UpperCamelCase :str = d_model
UpperCamelCase :Dict = decoder_layers
UpperCamelCase :Tuple = decoder_attention_heads
UpperCamelCase :Tuple = decoder_ffn_dim
UpperCamelCase :List[Any] = activation_function
UpperCamelCase :Dict = max_position_embeddings
UpperCamelCase :Optional[Any] = dropout
UpperCamelCase :List[str] = attention_dropout
UpperCamelCase :int = activation_dropout
UpperCamelCase :List[str] = init_std
UpperCamelCase :int = decoder_layerdrop
UpperCamelCase :List[Any] = use_cache
UpperCamelCase :Optional[Any] = scale_embedding
UpperCamelCase :Any = use_learned_position_embeddings
UpperCamelCase :Tuple = layernorm_embedding
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , **__lowerCamelCase , )
| 590 | 0 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __snake_case ( lowerCamelCase_ : int , lowerCamelCase_ : bool = True , lowerCamelCase_ : float = math.inf , lowerCamelCase_ : float = -math.inf , lowerCamelCase_ : float = math.inf , lowerCamelCase_ : float = -math.inf , lowerCamelCase_ : bool = False , lowerCamelCase_ : float = 100 , lowerCamelCase_ : float = 0.01 , lowerCamelCase_ : float = 1 , ):
'''simple docstring'''
__magic_name__ = False
__magic_name__ = search_prob
__magic_name__ = start_temperate
__magic_name__ = []
__magic_name__ = 0
__magic_name__ = None
while not search_end:
__magic_name__ = current_state.score()
if best_state is None or current_score > best_state.score():
__magic_name__ = current_state
scores.append(lowerCamelCase_ )
iterations += 1
__magic_name__ = None
__magic_name__ = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
__magic_name__ = random.randint(0 , len(lowerCamelCase_ ) - 1 ) # picking a random neighbor
__magic_name__ = neighbors.pop(lowerCamelCase_ )
__magic_name__ = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
__magic_name__ = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
__magic_name__ = picked_neighbor
else:
__magic_name__ = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
__magic_name__ = picked_neighbor
__magic_name__ = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
__magic_name__ = True
else:
__magic_name__ = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowerCamelCase_ ) , lowerCamelCase_ )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def __snake_case ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
__magic_name__ : Union[str, Any] =SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__magic_name__ : int =simulated_annealing(
prob, find_max=False, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
__magic_name__ : List[Any] =SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__magic_name__ : int =simulated_annealing(
prob, find_max=True, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def __snake_case ( lowerCamelCase_ : int , lowerCamelCase_ : List[str] ):
'''simple docstring'''
return (3 * x**2) - (6 * y)
__magic_name__ : int =SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__magic_name__ : List[Any] =simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F'''{local_min.score()}'''
)
__magic_name__ : int =SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__magic_name__ : int =simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F'''{local_min.score()}'''
)
| 664 |
'''simple docstring'''
import torch
from transformers import AutoModel
class UpperCamelCase_ ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Any , _lowerCamelCase : Optional[int]="sayef/fsner-bert-base-uncased" ) -> List[Any]:
super(_lowerCamelCase , self ).__init__()
__magic_name__ = AutoModel.from_pretrained(_lowerCamelCase , return_dict=_lowerCamelCase )
__magic_name__ = torch.nn.CosineSimilarity(3 , 1e-08 )
__magic_name__ = torch.nn.Softmax(dim=1 )
def __A ( self : Tuple , **_lowerCamelCase : Union[str, Any] ) -> Optional[int]:
return self.bert(**_lowerCamelCase ).last_hidden_state
def __A ( self : Dict , _lowerCamelCase : Dict ) -> Dict:
return token_embeddings.sum(2 , keepdim=_lowerCamelCase )
def __A ( self : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : Tuple=1 ) -> Optional[Any]:
return self.softmax(T * self.cos(_lowerCamelCase , _lowerCamelCase ) )
def __A ( self : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] ) -> List[str]:
__magic_name__ = W_supports["sizes"].tolist()
__magic_name__ = W_supports["start_token_id"].item()
__magic_name__ = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__magic_name__ = self.BERT(**_lowerCamelCase )
__magic_name__ = self.BERT(**_lowerCamelCase )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = W_supports["input_ids"] == start_token_id
__magic_name__ = W_supports["input_ids"] == end_token_id
for i, size in enumerate(_lowerCamelCase ):
if i == 0:
__magic_name__ = 0
else:
__magic_name__ = support_sizes[i - 1]
__magic_name__ = S[s : s + size][start_token_masks[s : s + size]]
__magic_name__ = S[s : s + size][end_token_masks[s : s + size]]
__magic_name__ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__magic_name__ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__magic_name__ = torch.vstack((p_starts, p_start) )
__magic_name__ = torch.vstack((p_ends, p_end) )
else:
__magic_name__ = p_start
__magic_name__ = p_end
return p_starts, p_ends
| 664 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase : List[Any] = {
'''configuration_conditional_detr''': [
'''CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ConditionalDetrConfig''',
'''ConditionalDetrOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = ['''ConditionalDetrFeatureExtractor''']
__lowerCAmelCase : Optional[Any] = ['''ConditionalDetrImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'''CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConditionalDetrForObjectDetection''',
'''ConditionalDetrForSegmentation''',
'''ConditionalDetrModel''',
'''ConditionalDetrPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 709 |
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 21 | 0 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase : List[str] = Dict[str, Any]
lowerCAmelCase : int = List[Prediction]
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(*snake_case__ , **snake_case__ )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , 'vision' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def a ( self , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {}
if "threshold" in kwargs:
_lowerCAmelCase : Union[str, Any] = kwargs['threshold']
return {}, {}, postprocess_kwargs
def __call__( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return super().__call__(*snake_case__ , **snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = load_image(snake_case__ )
_lowerCAmelCase : List[Any] = torch.IntTensor([[image.height, image.width]] )
_lowerCAmelCase : str = self.image_processor(images=[image] , return_tensors='pt' )
if self.tokenizer is not None:
_lowerCAmelCase : Optional[int] = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt' )
_lowerCAmelCase : List[str] = target_size
return inputs
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = model_inputs.pop('target_size' )
_lowerCAmelCase : int = self.model(**snake_case__ )
_lowerCAmelCase : Dict = outputs.__class__({'target_size': target_size, **outputs} )
if self.tokenizer is not None:
_lowerCAmelCase : int = model_inputs['bbox']
return model_outputs
def a ( self , snake_case__ , snake_case__=0.9 ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = model_outputs['target_size']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = target_size[0].tolist()
def unnormalize(snake_case__ ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
_lowerCAmelCase , _lowerCAmelCase : List[str] = model_outputs['logits'].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
_lowerCAmelCase : List[Any] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
_lowerCAmelCase : int = [unnormalize(snake_case__ ) for bbox in model_outputs['bbox'].squeeze(0 )]
_lowerCAmelCase : List[str] = ['score', 'label', 'box']
_lowerCAmelCase : int = [dict(zip(snake_case__ , snake_case__ ) ) for vals in zip(scores.tolist() , snake_case__ , snake_case__ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
_lowerCAmelCase : int = self.image_processor.post_process_object_detection(snake_case__ , snake_case__ , snake_case__ )
_lowerCAmelCase : Union[str, Any] = raw_annotations[0]
_lowerCAmelCase : Dict = raw_annotation['scores']
_lowerCAmelCase : Optional[int] = raw_annotation['labels']
_lowerCAmelCase : int = raw_annotation['boxes']
_lowerCAmelCase : List[Any] = scores.tolist()
_lowerCAmelCase : Optional[int] = [self.model.config.idalabel[label.item()] for label in labels]
_lowerCAmelCase : Optional[Any] = [self._get_bounding_box(snake_case__ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
_lowerCAmelCase : Optional[int] = ['score', 'label', 'box']
_lowerCAmelCase : List[Any] = [
dict(zip(snake_case__ , snake_case__ ) )
for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'] )
]
return annotation
def a ( self , snake_case__ ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = box.int().tolist()
_lowerCAmelCase : Optional[Any] = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 444 |
'''simple docstring'''
from __future__ import annotations
from math import gcd
def lowercase (_A , _A = 2 , _A = 1 , _A = 3 , ):
"""simple docstring"""
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(_A , _A , _A ) -> int:
return (pow(_A , 2 ) + step) % modulus
for _ in range(_A ):
# These track the position within the cycle detection logic.
_lowerCAmelCase : Dict = seed
_lowerCAmelCase : int = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
_lowerCAmelCase : str = rand_fn(_A , _A , _A )
_lowerCAmelCase : Optional[int] = rand_fn(_A , _A , _A )
_lowerCAmelCase : Union[str, Any] = rand_fn(_A , _A , _A )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
_lowerCAmelCase : Optional[int] = gcd(hare - tortoise , _A )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
_lowerCAmelCase : Tuple = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""num""",
type=int,
help="""The value to find a divisor of""",
)
parser.add_argument(
"""--attempts""",
type=int,
default=3,
help="""The number of attempts before giving up""",
)
lowerCAmelCase : List[str] = parser.parse_args()
lowerCAmelCase : List[str] = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F'''{args.num} is probably prime''')
else:
lowerCAmelCase : Union[str, Any] = args.num // divisor
print(F'''{args.num} = {divisor} * {quotient}''')
| 444 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
_UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class a ( a_ ):
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 134 |
"""simple docstring"""
import math
import unittest
def _SCREAMING_SNAKE_CASE ( __snake_case : int ):
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class a ( unittest.TestCase ):
def UpperCamelCase_ ( self ):
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(1_1 ) )
self.assertTrue(is_prime(1_3 ) )
self.assertTrue(is_prime(1_7 ) )
self.assertTrue(is_prime(1_9 ) )
self.assertTrue(is_prime(2_3 ) )
self.assertTrue(is_prime(2_9 ) )
def UpperCamelCase_ ( self ):
with self.assertRaises(_lowerCamelCase ):
is_prime(-1_9 )
self.assertFalse(
is_prime(0 ) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1 ) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 134 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : int = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Any = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 670 | from manim import *
class lowercase_ ( __snake_case ):
def UpperCamelCase ( self ):
_snake_case : Tuple = Rectangle(height=0.5 , width=0.5 )
_snake_case : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_snake_case : List[str] = [mem.copy() for i in range(6 )]
_snake_case : Any = [mem.copy() for i in range(6 )]
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : str = VGroup(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : int = Text("CPU" , font_size=24 )
_snake_case : str = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase_ )
_snake_case : int = [mem.copy() for i in range(4 )]
_snake_case : Dict = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : str = Text("GPU" , font_size=24 )
_snake_case : Optional[int] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase_ )
_snake_case : Any = [mem.copy() for i in range(6 )]
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Dict = Text("Model" , font_size=24 )
_snake_case : Dict = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
model.move_to([3, -1.0, 0] )
self.add(lowercase_ )
_snake_case : str = []
for i, rect in enumerate(lowercase_ ):
rect.set_stroke(lowercase_ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_snake_case : Union[str, Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0 )
self.add(lowercase_ )
cpu_targs.append(lowercase_ )
_snake_case : List[Any] = [mem.copy() for i in range(6 )]
_snake_case : Union[str, Any] = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Optional[Any] = Text("Loaded Checkpoint" , font_size=24 )
_snake_case : Union[str, Any] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_snake_case : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_snake_case : Optional[Any] = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>โ</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase_ , lowercase_ )
_snake_case : Union[str, Any] = MarkupText(
f"""<span fgcolor='{BLUE}'>โ</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_snake_case : List[Any] = MarkupText(
f"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase_ ) , Write(lowercase_ ) )
self.play(Write(lowercase_ , run_time=1 ) , Create(lowercase_ , run_time=1 ) )
_snake_case : int = []
_snake_case : str = []
for i, rect in enumerate(lowercase_ ):
_snake_case : Dict = fill.copy().set_fill(lowercase_ , opacity=0.7 )
target.move_to(lowercase_ )
first_animations.append(GrowFromCenter(lowercase_ , run_time=1 ) )
_snake_case : Dict = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5 ) )
self.play(*lowercase_ )
self.play(*lowercase_ )
self.wait() | 670 | 1 |
from typing import List
import numpy as np
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[Any] = {key: len(SCREAMING_SNAKE_CASE__ ) for key, value in gen_kwargs.items() if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'Sharding is ambiguous for this dataset: '
+ 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'
+ '\n'.join(f'\t- key {key} has length {length}' for key, length in lists_lengths.items() )
+ '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '
+ 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'
) )
__lowerCamelCase : str = max(lists_lengths.values() , default=0 )
return max(1 , SCREAMING_SNAKE_CASE__ )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[Any] = []
for group_idx in range(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Tuple = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
__lowerCamelCase : Tuple = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
__lowerCamelCase : int = range(SCREAMING_SNAKE_CASE__ , start + num_shards_to_add )
shards_indices_per_group.append(SCREAMING_SNAKE_CASE__ )
return shards_indices_per_group
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = _number_of_shards_in_gen_kwargs(SCREAMING_SNAKE_CASE__ )
if num_shards == 1:
return [dict(SCREAMING_SNAKE_CASE__ )]
else:
__lowerCamelCase : List[str] = _distribute_shards(num_shards=SCREAMING_SNAKE_CASE__ , max_num_jobs=SCREAMING_SNAKE_CASE__ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(SCREAMING_SNAKE_CASE__ ) )
]
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , SCREAMING_SNAKE_CASE__ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[Any] = {len(SCREAMING_SNAKE_CASE__ ) for value in gen_kwargs.values() if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
__lowerCamelCase : Union[str, Any] = {}
for size in list_sizes:
__lowerCamelCase : Dict = list(range(SCREAMING_SNAKE_CASE__ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
__lowerCamelCase : List[str] = dict(SCREAMING_SNAKE_CASE__ )
for key, value in shuffled_kwargs.items():
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Tuple = [value[i] for i in indices_per_size[len(SCREAMING_SNAKE_CASE__ )]]
return shuffled_kwargs
| 230 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Union[str, Any] = nn.functional.normalize(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Tuple = nn.functional.normalize(SCREAMING_SNAKE_CASE__ )
return torch.mm(SCREAMING_SNAKE_CASE__ , normalized_text_embeds.t() )
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = CLIPConfig
__snake_case = ["""CLIPEncoderLayer"""]
def __init__( self: List[Any] , a: CLIPConfig ):
super().__init__(a )
__lowerCamelCase : List[str] = CLIPVisionModel(config.vision_config )
__lowerCamelCase : Union[str, Any] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=a )
__lowerCamelCase : Any = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=a )
__lowerCamelCase : List[str] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=a )
__lowerCamelCase : Any = nn.Parameter(torch.ones(17 ) , requires_grad=a )
__lowerCamelCase : Any = nn.Parameter(torch.ones(3 ) , requires_grad=a )
@torch.no_grad()
def _snake_case ( self: Any , a: List[Any] , a: Union[str, Any] ):
__lowerCamelCase : Optional[Any] = self.vision_model(a )[1] # pooled_output
__lowerCamelCase : Dict = self.visual_projection(a )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__lowerCamelCase : int = cosine_distance(a , self.special_care_embeds ).cpu().float().numpy()
__lowerCamelCase : Optional[int] = cosine_distance(a , self.concept_embeds ).cpu().float().numpy()
__lowerCamelCase : List[str] = []
__lowerCamelCase : Tuple = image_embeds.shape[0]
for i in range(a ):
__lowerCamelCase : int = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
__lowerCamelCase : int = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
__lowerCamelCase : List[Any] = special_cos_dist[i][concept_idx]
__lowerCamelCase : str = self.special_care_embeds_weights[concept_idx].item()
__lowerCamelCase : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]} )
__lowerCamelCase : Optional[Any] = 0.0_1
for concept_idx in range(len(cos_dist[0] ) ):
__lowerCamelCase : Optional[Any] = cos_dist[i][concept_idx]
__lowerCamelCase : Union[str, Any] = self.concept_embeds_weights[concept_idx].item()
__lowerCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(a )
result.append(a )
__lowerCamelCase : Tuple = [len(res['bad_concepts'] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _snake_case ( self: str , a: torch.FloatTensor , a: torch.FloatTensor ):
__lowerCamelCase : Optional[int] = self.vision_model(a )[1] # pooled_output
__lowerCamelCase : str = self.visual_projection(a )
__lowerCamelCase : str = cosine_distance(a , self.special_care_embeds )
__lowerCamelCase : Dict = cosine_distance(a , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
__lowerCamelCase : List[str] = 0.0
__lowerCamelCase : Tuple = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
__lowerCamelCase : int = torch.any(special_scores > 0 , dim=1 )
__lowerCamelCase : List[str] = special_care * 0.0_1
__lowerCamelCase : str = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
__lowerCamelCase : Dict = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
__lowerCamelCase : Optional[Any] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 230 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class _snake_case ( lowerCamelCase_):
UpperCamelCase__ : List[str] ="""layoutlmv3"""
def __init__( self : Dict, __lowercase : Optional[Any]=5_0265, __lowercase : str=768, __lowercase : str=12, __lowercase : str=12, __lowercase : Union[str, Any]=3072, __lowercase : List[str]="gelu", __lowercase : Dict=0.1, __lowercase : List[Any]=0.1, __lowercase : Dict=512, __lowercase : Any=2, __lowercase : Optional[Any]=0.02, __lowercase : List[str]=1e-5, __lowercase : Dict=1, __lowercase : Optional[Any]=0, __lowercase : int=2, __lowercase : Union[str, Any]=1024, __lowercase : List[str]=128, __lowercase : Tuple=128, __lowercase : int=True, __lowercase : List[str]=32, __lowercase : Optional[int]=128, __lowercase : List[str]=64, __lowercase : Optional[Any]=256, __lowercase : List[Any]=True, __lowercase : str=True, __lowercase : Tuple=True, __lowercase : Any=224, __lowercase : List[Any]=3, __lowercase : Optional[Any]=16, __lowercase : List[str]=None, **__lowercase : Any, ):
super().__init__(
vocab_size=_UpperCamelCase, hidden_size=_UpperCamelCase, num_hidden_layers=_UpperCamelCase, num_attention_heads=_UpperCamelCase, intermediate_size=_UpperCamelCase, hidden_act=_UpperCamelCase, hidden_dropout_prob=_UpperCamelCase, attention_probs_dropout_prob=_UpperCamelCase, max_position_embeddings=_UpperCamelCase, type_vocab_size=_UpperCamelCase, initializer_range=_UpperCamelCase, layer_norm_eps=_UpperCamelCase, pad_token_id=_UpperCamelCase, bos_token_id=_UpperCamelCase, eos_token_id=_UpperCamelCase, **_UpperCamelCase, )
lowercase__ = max_ad_position_embeddings
lowercase__ = coordinate_size
lowercase__ = shape_size
lowercase__ = has_relative_attention_bias
lowercase__ = rel_pos_bins
lowercase__ = max_rel_pos
lowercase__ = has_spatial_attention_bias
lowercase__ = rel_ad_pos_bins
lowercase__ = max_rel_ad_pos
lowercase__ = text_embed
lowercase__ = visual_embed
lowercase__ = input_size
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = classifier_dropout
class _snake_case ( lowerCamelCase_):
UpperCamelCase__ : Dict =version.parse("""1.12""")
@property
def A__ ( self : Optional[int] ):
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
else:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels"}),
] )
@property
def A__ ( self : Optional[int] ):
return 1e-5
@property
def A__ ( self : Tuple ):
return 12
def A__ ( self : Optional[Any], __lowercase : "ProcessorMixin", __lowercase : int = -1, __lowercase : int = -1, __lowercase : bool = False, __lowercase : Optional["TensorType"] = None, __lowercase : int = 3, __lowercase : int = 40, __lowercase : int = 40, ):
setattr(processor.image_processor, "apply_ocr", _UpperCamelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase__ = compute_effective_axis_dimension(
_UpperCamelCase, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase__ = processor.tokenizer.num_special_tokens_to_add(_UpperCamelCase )
lowercase__ = compute_effective_axis_dimension(
_UpperCamelCase, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=_UpperCamelCase )
# Generate dummy inputs according to compute batch and sequence
lowercase__ = [[" ".join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
lowercase__ = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
lowercase__ = self._generate_dummy_images(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
lowercase__ = dict(
processor(
_UpperCamelCase, text=_UpperCamelCase, boxes=_UpperCamelCase, return_tensors=_UpperCamelCase, ) )
return inputs
| 413 |
import warnings
from ..trainer import Trainer
from ..utils import logging
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
class A ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : List[str] , _UpperCamelCase : int=None , **_UpperCamelCase : Optional[int]):
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead." , _UpperCamelCase , )
super().__init__(args=_UpperCamelCase , **_UpperCamelCase)
| 226 | 0 |
"""simple docstring"""
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
UpperCAmelCase : Optional[int] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
UpperCAmelCase : List[str] = get_tests_dir('fixtures/vocab.json')
UpperCAmelCase : List[Any] = get_tests_dir('fixtures')
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__a = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 0
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Any = WavaVecaConfig()
__UpperCAmelCase : Tuple = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
__UpperCAmelCase : int = AutoProcessor.from_pretrained(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(UpperCamelCase , os.path.join(UpperCamelCase , UpperCamelCase ) )
copyfile(UpperCamelCase , os.path.join(UpperCamelCase , """vocab.json""" ) )
__UpperCAmelCase : Optional[Any] = AutoProcessor.from_pretrained(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : List[str] = WavaVecaFeatureExtractor()
__UpperCAmelCase : Any = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
__UpperCAmelCase : Optional[Any] = WavaVecaProcessor(UpperCamelCase , UpperCamelCase )
# save in new folder
processor.save_pretrained(UpperCamelCase )
# drop `processor_class` in tokenizer
with open(os.path.join(UpperCamelCase , UpperCamelCase ) , """r""" ) as f:
__UpperCAmelCase : Union[str, Any] = json.load(UpperCamelCase )
config_dict.pop("""processor_class""" )
with open(os.path.join(UpperCamelCase , UpperCamelCase ) , """w""" ) as f:
f.write(json.dumps(UpperCamelCase ) )
__UpperCAmelCase : Dict = AutoProcessor.from_pretrained(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Union[str, Any] = WavaVecaFeatureExtractor()
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
__UpperCAmelCase : List[Any] = WavaVecaProcessor(UpperCamelCase , UpperCamelCase )
# save in new folder
processor.save_pretrained(UpperCamelCase )
# drop `processor_class` in feature extractor
with open(os.path.join(UpperCamelCase , UpperCamelCase ) , """r""" ) as f:
__UpperCAmelCase : Union[str, Any] = json.load(UpperCamelCase )
config_dict.pop("""processor_class""" )
with open(os.path.join(UpperCamelCase , UpperCamelCase ) , """w""" ) as f:
f.write(json.dumps(UpperCamelCase ) )
__UpperCAmelCase : str = AutoProcessor.from_pretrained(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Any = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(UpperCamelCase )
# copy relevant files
copyfile(UpperCamelCase , os.path.join(UpperCamelCase , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(UpperCamelCase , UpperCamelCase ) , """w""" ) as f:
f.write("""{}""" )
__UpperCAmelCase : Optional[Any] = AutoProcessor.from_pretrained(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
with self.assertRaises(UpperCamelCase ):
__UpperCAmelCase : Dict = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase ):
__UpperCAmelCase : Optional[Any] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=UpperCamelCase )
__UpperCAmelCase : List[Any] = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=UpperCamelCase )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
__UpperCAmelCase : Any = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
__UpperCAmelCase : Any = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
__UpperCAmelCase : Optional[Any] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=UpperCamelCase , use_fast=UpperCamelCase )
__UpperCAmelCase : Dict = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
try:
AutoConfig.register("""custom""" , UpperCamelCase )
AutoFeatureExtractor.register(UpperCamelCase , UpperCamelCase )
AutoTokenizer.register(UpperCamelCase , slow_tokenizer_class=UpperCamelCase )
AutoProcessor.register(UpperCamelCase , UpperCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase ):
AutoProcessor.register(UpperCamelCase , UpperCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
__UpperCAmelCase : List[str] = CustomFeatureExtractor.from_pretrained(UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase : Tuple = os.path.join(UpperCamelCase , """vocab.txt""" )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
__UpperCAmelCase : str = CustomTokenizer(UpperCamelCase )
__UpperCAmelCase : str = CustomProcessor(UpperCamelCase , UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(UpperCamelCase )
__UpperCAmelCase : Optional[Any] = AutoProcessor.from_pretrained(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = False
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = False
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = """AutoFeatureExtractor"""
__a = """AutoTokenizer"""
__a = False
try:
AutoConfig.register("""custom""" , UpperCamelCase )
AutoFeatureExtractor.register(UpperCamelCase , UpperCamelCase )
AutoTokenizer.register(UpperCamelCase , slow_tokenizer_class=UpperCamelCase )
AutoProcessor.register(UpperCamelCase , UpperCamelCase )
# If remote code is not set, the default is to use local classes.
__UpperCAmelCase : List[str] = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
__UpperCAmelCase : str = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=UpperCamelCase )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
__UpperCAmelCase : int = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=UpperCamelCase )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Dict = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__a = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def lowerCamelCase__ ( cls : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = TOKEN
HfFolder.save_token(UpperCamelCase )
@classmethod
def lowerCamelCase__ ( cls : Optional[int] ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = WavaVecaProcessor.from_pretrained(UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase , """test-processor""" ) , push_to_hub=UpperCamelCase , use_auth_token=self._token )
__UpperCAmelCase : Optional[Any] = WavaVecaProcessor.from_pretrained(f'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase , getattr(new_processor.feature_extractor , UpperCamelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Tuple = WavaVecaProcessor.from_pretrained(UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase , """test-processor-org""" ) , push_to_hub=UpperCamelCase , use_auth_token=self._token , organization="""valid_org""" , )
__UpperCAmelCase : List[str] = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase , getattr(new_processor.feature_extractor , UpperCamelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
__UpperCAmelCase : str = CustomFeatureExtractor.from_pretrained(UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase : int = os.path.join(UpperCamelCase , """vocab.txt""" )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
__UpperCAmelCase : Union[str, Any] = CustomTokenizer(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = CustomProcessor(UpperCamelCase , UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f'''{USER}/test-dynamic-processor''' , token=self._token )
__UpperCAmelCase : List[str] = Repository(UpperCamelCase , clone_from=f'''{USER}/test-dynamic-processor''' , token=self._token )
processor.save_pretrained(UpperCamelCase )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(UpperCamelCase , """tokenizer_config.json""" ) ) as f:
__UpperCAmelCase : str = json.load(UpperCamelCase )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase , """custom_processing.py""" ) ) )
repo.push_to_hub()
__UpperCAmelCase : int = AutoProcessor.from_pretrained(f'''{USER}/test-dynamic-processor''' , trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 713 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCAmelCase : int = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
UpperCAmelCase : List[Any] = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
UpperCAmelCase : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def lowerCamelCase ( _UpperCamelCase : str ) -> str:
'''simple docstring'''
with open(_UpperCamelCase , """rb""" ) as f:
__UpperCAmelCase : List[Any] = Image.open(_UpperCamelCase )
return im.convert("""RGB""" )
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
__a = field(
default=A , metadata={
"""help""": """Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."""
} , )
__a = field(
default=A , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
__a = field(default=A , metadata={"""help""": """A folder containing the training data."""} )
__a = field(default=A , metadata={"""help""": """A folder containing the validation data."""} )
__a = field(
default=0.1_5 , metadata={"""help""": """Percent to split off of train for validation."""} )
__a = field(
default=A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__a = field(
default=A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"""You must specify either a dataset name from the hub or a train and/or validation directory.""" )
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
__a = field(
default="""google/vit-base-patch16-224-in21k""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
__a = field(
default=A , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(A )} , )
__a = field(
default=A , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__a = field(
default=A , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
__a = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__a = field(default=A , metadata={"""help""": """Name or path of preprocessor config."""} )
__a = field(
default=A , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
__a = field(
default=A , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def lowerCamelCase ( _UpperCamelCase : str ) -> int:
'''simple docstring'''
__UpperCAmelCase : str = torch.stack([example["""pixel_values"""] for example in examples] )
__UpperCAmelCase : Optional[Any] = torch.tensor([example["""labels"""] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def lowerCamelCase ( ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_image_classification""" , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__UpperCAmelCase : int = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__UpperCAmelCase : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCAmelCase : Tuple = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
__UpperCAmelCase : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="""image-classification""" , use_auth_token=True if model_args.use_auth_token else None , )
else:
__UpperCAmelCase : Tuple = {}
if data_args.train_dir is not None:
__UpperCAmelCase : Optional[int] = os.path.join(data_args.train_dir , """**""" )
if data_args.validation_dir is not None:
__UpperCAmelCase : Any = os.path.join(data_args.validation_dir , """**""" )
__UpperCAmelCase : Dict = load_dataset(
"""imagefolder""" , data_files=_UpperCamelCase , cache_dir=model_args.cache_dir , task="""image-classification""" , )
# If we don't have a validation split, split off a percentage of train as validation.
__UpperCAmelCase : List[str] = None if """validation""" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _UpperCamelCase ) and data_args.train_val_split > 0.0:
__UpperCAmelCase : Any = dataset["""train"""].train_test_split(data_args.train_val_split )
__UpperCAmelCase : Union[str, Any] = split["""train"""]
__UpperCAmelCase : Optional[Any] = split["""test"""]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__UpperCAmelCase : Union[str, Any] = dataset["""train"""].features["""labels"""].names
__UpperCAmelCase ,__UpperCAmelCase : Optional[int] = {}, {}
for i, label in enumerate(_UpperCamelCase ):
__UpperCAmelCase : Any = str(_UpperCamelCase )
__UpperCAmelCase : str = label
# Load the accuracy metric from the datasets package
__UpperCAmelCase : str = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase : Any ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
__UpperCAmelCase : Tuple = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_UpperCamelCase ) , labelaid=_UpperCamelCase , idalabel=_UpperCamelCase , finetuning_task="""image-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase : List[str] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
__UpperCAmelCase : Dict = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
__UpperCAmelCase : int = image_processor.size["""shortest_edge"""]
else:
__UpperCAmelCase : List[Any] = (image_processor.size["""height"""], image_processor.size["""width"""])
__UpperCAmelCase : List[Any] = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
__UpperCAmelCase : Any = Compose(
[
RandomResizedCrop(_UpperCamelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
__UpperCAmelCase : str = Compose(
[
Resize(_UpperCamelCase ),
CenterCrop(_UpperCamelCase ),
ToTensor(),
normalize,
] )
def train_transforms(_UpperCamelCase : List[Any] ):
__UpperCAmelCase : Optional[int] = [
_train_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["""image"""]
]
return example_batch
def val_transforms(_UpperCamelCase : Optional[int] ):
__UpperCAmelCase : Dict = [_val_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["""image"""]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
__UpperCAmelCase : str = (
dataset["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(_UpperCamelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
__UpperCAmelCase : Dict = (
dataset["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(_UpperCamelCase )
# Initalize our trainer
__UpperCAmelCase : Any = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=dataset["""train"""] if training_args.do_train else None , eval_dataset=dataset["""validation"""] if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , data_collator=_UpperCamelCase , )
# Training
if training_args.do_train:
__UpperCAmelCase : Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
__UpperCAmelCase : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__UpperCAmelCase : Dict = last_checkpoint
__UpperCAmelCase : Union[str, Any] = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__UpperCAmelCase : Optional[int] = trainer.evaluate()
trainer.log_metrics("""eval""" , _UpperCamelCase )
trainer.save_metrics("""eval""" , _UpperCamelCase )
# Write model card and (optionally) push to hub
__UpperCAmelCase : Union[str, Any] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """image-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""image-classification""", """vision"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main()
| 299 | 0 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __a ( A__ : str , A__ : Any=10 ):
SCREAMING_SNAKE_CASE = []
for _ in range(A__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __a ( A__ : List[Any] , A__ : Optional[Any]=10 ):
SCREAMING_SNAKE_CASE = []
for step in range(A__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = os.path.join(A__ , "schedule.bin" )
torch.save(scheduler.state_dict() , A__ )
SCREAMING_SNAKE_CASE = torch.load(A__ )
scheduler.load_state_dict(A__ )
return lrs
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ):
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for a, b in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertAlmostEqual(__lowerCamelCase , __lowerCamelCase , delta=__lowerCamelCase )
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.4, 0.2, -0.5] )
SCREAMING_SNAKE_CASE = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
SCREAMING_SNAKE_CASE = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
SCREAMING_SNAKE_CASE = criterion(__lowerCamelCase , __lowerCamelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.4, 0.2, -0.5] )
SCREAMING_SNAKE_CASE = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
SCREAMING_SNAKE_CASE = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=__lowerCamelCase , weight_decay=0.0 , relative_step=__lowerCamelCase , scale_parameter=__lowerCamelCase , warmup_init=__lowerCamelCase , )
for _ in range(1000 ):
SCREAMING_SNAKE_CASE = criterion(__lowerCamelCase , __lowerCamelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
lowerCamelCase__ = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
lowerCamelCase__ = 1_0
def _snake_case ( self : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any=None ):
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for a, b in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertAlmostEqual(__lowerCamelCase , __lowerCamelCase , delta=__lowerCamelCase , msg=__lowerCamelCase )
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
SCREAMING_SNAKE_CASE = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = data
SCREAMING_SNAKE_CASE = scheduler_func(self.optimizer , **__lowerCamelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
SCREAMING_SNAKE_CASE = unwrap_schedule(__lowerCamelCase , self.num_steps )
self.assertListAlmostEqual(
__lowerCamelCase , __lowerCamelCase , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , )
SCREAMING_SNAKE_CASE = scheduler_func(self.optimizer , **__lowerCamelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__lowerCamelCase ) # wrap to test picklability of the schedule
SCREAMING_SNAKE_CASE = unwrap_and_save_reload_schedule(__lowerCamelCase , self.num_steps )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase , msg=f"failed for {scheduler_func} in save and reload" )
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = fn
def __call__( self : List[Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[Any] ):
return self.fn(*__lowerCamelCase , **__lowerCamelCase )
@classmethod
def _snake_case ( self : List[Any] , __lowerCamelCase : Dict ):
SCREAMING_SNAKE_CASE = list(map(self , scheduler.lr_lambdas ) ) | 16 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
A = importlib.util.spec_from_file_location(
'transformers',
os.path.join(PATH_TO_TRANSFORMERS, '__init__.py'),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
A = spec.loader.load_module()
A = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
A = re.compile('\[(.+?)\]\((https://huggingface\.co/.+?)\)')
A = {
'CLIPConfigMixin',
'DecisionTransformerConfigMixin',
'EncoderDecoderConfigMixin',
'RagConfigMixin',
'SpeechEncoderDecoderConfigMixin',
'VisionEncoderDecoderConfigMixin',
'VisionTextDualEncoderConfigMixin',
}
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
snake_case : Union[str, Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
snake_case : List[str] = False
# source code of `config_class`
snake_case : Optional[int] = inspect.getsource(lowerCamelCase_ )
snake_case : Optional[int] = _re_checkpoint.findall(lowerCamelCase_ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
snake_case , snake_case : Tuple = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
snake_case : Any = f'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
snake_case : List[str] = True
break
snake_case : Any = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
snake_case : List[str] = "\n".join(sorted(lowerCamelCase_ ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 449 | 0 |
from math import factorial
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ = 1_0_0 ):
return sum(int(UpperCamelCase__ ) for x in str(factorial(UpperCamelCase__ ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 712 |
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ = 5_0_0_0_0_0_0_0 ):
UpperCamelCase__ : Any = set()
UpperCamelCase__ : Any = int((limit - 2_4) ** (1 / 2) )
UpperCamelCase__ : Dict = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , UpperCamelCase__ ) ) )
for primea in primes:
UpperCamelCase__ : Dict = primea * primea
for primea in primes:
UpperCamelCase__ : str = primea * primea * primea
if square + cube >= limit - 1_6:
break
for primea in primes:
UpperCamelCase__ : Dict = primea * primea * primea * primea
UpperCamelCase__ : Tuple = square + cube + tetr
if total >= limit:
break
ret.add(UpperCamelCase__ )
return len(UpperCamelCase__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 462 | 0 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ :
def __init__( self : List[str] , _A : Optional[Any] , _A : str=13 , _A : List[Any]=32 , _A : Tuple=3 , _A : Tuple=4 , _A : List[str]=[10, 20, 30, 40] , _A : Tuple=[2, 2, 3, 2] , _A : List[str]=True , _A : List[str]=True , _A : Optional[int]=37 , _A : Union[str, Any]="gelu" , _A : int=10 , _A : List[str]=0.0_2 , _A : Tuple=["stage2", "stage3", "stage4"] , _A : Dict=3 , _A : Dict=None , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : Dict = batch_size
UpperCAmelCase__ : int = image_size
UpperCAmelCase__ : int = num_channels
UpperCAmelCase__ : int = num_stages
UpperCAmelCase__ : List[Any] = hidden_sizes
UpperCAmelCase__ : Dict = depths
UpperCAmelCase__ : Optional[int] = is_training
UpperCAmelCase__ : Tuple = use_labels
UpperCAmelCase__ : Optional[Any] = intermediate_size
UpperCAmelCase__ : Optional[int] = hidden_act
UpperCAmelCase__ : Optional[int] = type_sequence_label_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : Any = out_features
UpperCAmelCase__ : Optional[Any] = num_labels
UpperCAmelCase__ : Any = scope
UpperCAmelCase__ : List[str] = num_stages
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Any = None
if self.use_labels:
UpperCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : List[Any] = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : int ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def lowercase_ ( self : str ):
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_A , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_A , loss_ignore_index=255 , num_labels=self.num_labels , )
def lowercase_ ( self : Any , _A : int , _A : List[str] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = UperNetForSemanticSegmentation(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(_A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase__ : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowerCAmelCase__ = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = UperNetModelTester(self )
UpperCAmelCase__ : Dict = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def lowercase_ ( self : Dict ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self : Dict ):
'''simple docstring'''
return
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = model_class(_A )
UpperCAmelCase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Any = [*signature.parameters.keys()]
UpperCAmelCase__ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_A )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def lowercase_ ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def lowercase_ ( self : Dict ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
def lowercase_ ( self : Tuple ):
'''simple docstring'''
def check_hidden_states_output(_A : Any , _A : List[Any] , _A : Dict ):
UpperCAmelCase__ : str = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase__ : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase__ : List[str] = self.model_tester.num_stages
self.assertEqual(len(_A ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : Optional[Any] = True
check_hidden_states_output(_A , _A , _A )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Optional[Any] = _config_zero_init(_A )
UpperCAmelCase__ : Dict = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[Any] = model_class(config=_A )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
pass
@slow
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Any = UperNetForSemanticSegmentation.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ) -> Tuple:
UpperCAmelCase__ : Optional[int] = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
UpperCAmelCase__ : Optional[Any] = Image.open(lowerCAmelCase__ ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
UpperCAmelCase__ : Any = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(_A )
UpperCAmelCase__ : Union[str, Any] = prepare_img()
UpperCAmelCase__ : Union[str, Any] = processor(images=_A , return_tensors='''pt''' ).to(_A )
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**_A )
UpperCAmelCase__ : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase__ : Any = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1e-4 ) )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
UpperCAmelCase__ : Any = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(_A )
UpperCAmelCase__ : str = prepare_img()
UpperCAmelCase__ : Dict = processor(images=_A , return_tensors='''pt''' ).to(_A )
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**_A )
UpperCAmelCase__ : Tuple = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase__ : List[Any] = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1e-4 ) )
| 75 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( __lowercase ):
UpperCamelCase_ : str = ["pixel_values"]
def __init__( self , a = True , a = None , a = PIL.Image.BICUBIC , a = True , a = None , a = 1 / 2_55 , a = True , a = True , a = None , a = None , **a , ) -> None:
'''simple docstring'''
super().__init__(**a )
_UpperCamelCase = size if size is not None else {"""height""": 2_56, """width""": 2_56}
_UpperCamelCase = get_size_dict(a )
_UpperCamelCase = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
_UpperCamelCase = get_size_dict(a , param_name="""crop_size""" )
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = resample
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self , a , a , a = PIL.Image.BICUBIC , a = None , **a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return resize(
a , size=(size["""height"""], size["""width"""]) , resample=a , data_format=a , **a )
def A_ ( self , a , a , a = None , **a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(a , size=(size["""height"""], size["""width"""]) , data_format=a , **a )
def A_ ( self , a , a , a = None , **a , ) -> List[str]:
'''simple docstring'''
return rescale(a , scale=a , data_format=a , **a )
def A_ ( self , a , a , a , a = None , **a , ) -> np.ndarray:
'''simple docstring'''
return normalize(a , mean=a , std=a , data_format=a , **a )
def A_ ( self , a , a = None , a = None , a=None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ) -> PIL.Image.Image:
'''simple docstring'''
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(a )
_UpperCamelCase = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase = get_size_dict(a , param_name="""crop_size""" )
_UpperCamelCase = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(a ) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
_UpperCamelCase = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=a , mean=a , std=a ) for image in images]
_UpperCamelCase = [to_channel_dimension_format(a , a ) for image in images]
_UpperCamelCase = {"""pixel_values""": images}
return BatchFeature(data=a , tensor_type=a )
| 612 | 0 |
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger("""transformers.models.encodec""")
_lowerCAmelCase = {
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
_lowerCAmelCase = {
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
_lowerCAmelCase = {
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
_lowerCAmelCase = {
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
_lowerCAmelCase = {
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
_lowerCAmelCase = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
_lowerCAmelCase = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
_lowerCAmelCase = []
_lowerCAmelCase = []
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
for attribute in key.split(""".""" ):
A_ : List[str] = getattr(_lowerCAmelCase ,_lowerCAmelCase )
if weight_type is not None:
A_ : List[str] = getattr(_lowerCAmelCase ,_lowerCAmelCase ).shape
else:
A_ : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
A_ : List[Any] = value
elif weight_type == "weight_g":
A_ : int = value
elif weight_type == "weight_v":
A_ : int = value
elif weight_type == "bias":
A_ : Optional[Any] = value
elif weight_type == "running_mean":
A_ : str = value
elif weight_type == "running_var":
A_ : List[str] = value
elif weight_type == "num_batches_tracked":
A_ : Tuple = value
elif weight_type == "weight_ih_l0":
A_ : Optional[Any] = value
elif weight_type == "weight_hh_l0":
A_ : List[Any] = value
elif weight_type == "bias_ih_l0":
A_ : Optional[Any] = value
elif weight_type == "bias_hh_l0":
A_ : Union[str, Any] = value
elif weight_type == "weight_ih_l1":
A_ : int = value
elif weight_type == "weight_hh_l1":
A_ : Any = value
elif weight_type == "bias_ih_l1":
A_ : List[Any] = value
elif weight_type == "bias_hh_l1":
A_ : List[Any] = value
else:
A_ : Optional[int] = value
logger.info(f"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A_ : List[str] = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
A_ : int = []
if model_name == "encodec_24khz" or "encodec_32khz":
A_ : Tuple = MAPPING_24K
elif model_name == "encodec_48khz":
A_ : int = MAPPING_48K
else:
raise ValueError(f"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(_lowerCAmelCase ,_lowerCAmelCase ):
logger.info(f"""{name} was ignored""" )
continue
A_ : Any = False
for key, mapped_key in MAPPING.items():
if "*" in key:
A_ : Any = key.split(""".*.""" )
if prefix in name and suffix in name:
A_ : Optional[Any] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ):
continue
A_ : int = True
if "*" in mapped_key:
A_ : Optional[Any] = name.split(_lowerCAmelCase )[0].split(""".""" )[-2]
A_ : Dict = mapped_key.replace("""*""" ,_lowerCAmelCase )
if "weight_g" in name:
A_ : str = """weight_g"""
elif "weight_v" in name:
A_ : Any = """weight_v"""
elif "weight_ih_l0" in name:
A_ : Any = """weight_ih_l0"""
elif "weight_hh_l0" in name:
A_ : Union[str, Any] = """weight_hh_l0"""
elif "bias_ih_l0" in name:
A_ : List[Any] = """bias_ih_l0"""
elif "bias_hh_l0" in name:
A_ : int = """bias_hh_l0"""
elif "weight_ih_l1" in name:
A_ : Optional[int] = """weight_ih_l1"""
elif "weight_hh_l1" in name:
A_ : str = """weight_hh_l1"""
elif "bias_ih_l1" in name:
A_ : Tuple = """bias_ih_l1"""
elif "bias_hh_l1" in name:
A_ : Dict = """bias_hh_l1"""
elif "bias" in name:
A_ : List[str] = """bias"""
elif "weight" in name:
A_ : int = """weight"""
elif "running_mean" in name:
A_ : Optional[Any] = """running_mean"""
elif "running_var" in name:
A_ : str = """running_var"""
elif "num_batches_tracked" in name:
A_ : List[str] = """num_batches_tracked"""
else:
A_ : Any = None
set_recursively(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,):
'''simple docstring'''
if config_path is not None:
A_ : Tuple = EncodecConfig.from_pretrained(_lowerCAmelCase )
else:
A_ : List[str] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
A_ : Optional[Any] = [8, 5, 4, 4]
A_ : Optional[Any] = [2.2]
A_ : str = 6_4
A_ : Optional[int] = 3_2_0_0_0
A_ : Dict = 2_0_4_8
A_ : Optional[Any] = False
A_ : Optional[Any] = False
A_ : Dict = False
elif model_name == "encodec_48khz":
A_ : Union[str, Any] = [8, 5, 4, 2]
A_ : List[str] = [3.0, 6.0, 12.0, 24.0]
A_ : Tuple = 4_8_0_0_0
A_ : Tuple = 2
A_ : Tuple = False
A_ : List[str] = """time_group_norm"""
A_ : Tuple = True
A_ : str = 1.0
A_ : Dict = 0.01
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
A_ : Union[str, Any] = EncodecModel(_lowerCAmelCase )
A_ : Dict = EncodecFeatureExtractor(
feature_size=config.audio_channels ,sampling_rate=config.sampling_rate ,chunk_length_s=config.chunk_length_s ,overlap=config.overlap ,)
feature_extractor.save_pretrained(_lowerCAmelCase )
A_ : Dict = torch.load(_lowerCAmelCase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
A_ : List[str] = original_checkpoint["""best_state"""]
recursively_load_weights(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if repo_id:
print("""Pushing to the hub...""" )
feature_extractor.push_to_hub(_lowerCAmelCase )
model.push_to_hub(_lowerCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the ๐ค hub."""
)
_lowerCAmelCase = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 701 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
# TODO: upload to AWS
_lowerCAmelCase = {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"""
),
}
class _UpperCAmelCase ( _lowerCamelCase ):
a = '''retribert'''
def __init__( self , a__=30522 , a__=768 , a__=8 , a__=12 , a__=3072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=2 , a__=0.02 , a__=1E-12 , a__=True , a__=128 , a__=0 , **a__ , ):
super().__init__(pad_token_id=a__ , **a__ )
A_ : Union[str, Any] = vocab_size
A_ : Optional[Any] = hidden_size
A_ : Optional[Any] = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : Optional[int] = hidden_act
A_ : Any = intermediate_size
A_ : Tuple = hidden_dropout_prob
A_ : Optional[Any] = attention_probs_dropout_prob
A_ : Dict = max_position_embeddings
A_ : Dict = type_vocab_size
A_ : List[str] = initializer_range
A_ : Optional[int] = layer_norm_eps
A_ : Optional[Any] = share_encoders
A_ : Dict = projection_dim
| 481 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def _a ( _lowerCamelCase , _lowerCamelCase=False ) -> Any:
"""simple docstring"""
__snake_case : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__snake_case : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Tuple:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
__snake_case : List[str] = """"""
else:
__snake_case : Dict = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__snake_case : List[str] = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
__snake_case : Any = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__snake_case : Dict = in_proj_weight[
: config.hidden_size, :
]
__snake_case : str = in_proj_bias[: config.hidden_size]
__snake_case : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__snake_case : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__snake_case : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
__snake_case : str = in_proj_bias[-config.hidden_size :]
def _a ( _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Union[str, Any] = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
__snake_case : Dict = dct.pop(_lowerCamelCase )
__snake_case : List[Any] = val
def _a ( ) -> Tuple:
"""simple docstring"""
__snake_case : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__snake_case : Optional[Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ) -> str:
"""simple docstring"""
__snake_case : Optional[Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
__snake_case : List[Any] = 8
# set labels if required
if not base_model:
__snake_case : Union[str, Any] = 1000
__snake_case : str = """huggingface/label-files"""
__snake_case : List[str] = """imagenet-1k-id2label.json"""
__snake_case : List[str] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__snake_case : Optional[Any] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
__snake_case : Optional[int] = idalabel
__snake_case : Any = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__snake_case : List[str] = 384
__snake_case : Optional[Any] = 1536
__snake_case : Optional[int] = 12
__snake_case : Optional[Any] = 6
# load original model from torch hub
__snake_case : List[str] = torch.hub.load("""facebookresearch/dino:main""" , _lowerCamelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
__snake_case : str = original_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
__snake_case : str = create_rename_keys(_lowerCamelCase , base_model=_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if base_model:
__snake_case : Union[str, Any] = ViTModel(_lowerCamelCase , add_pooling_layer=_lowerCamelCase ).eval()
else:
__snake_case : Optional[int] = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor
__snake_case : List[str] = ViTImageProcessor()
__snake_case : Tuple = image_processor(images=prepare_img() , return_tensors="""pt""" )
__snake_case : Any = encoding["""pixel_values"""]
__snake_case : Optional[int] = model(_lowerCamelCase )
if base_model:
__snake_case : List[Any] = original_model(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
__snake_case : Dict = original_model(_lowerCamelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1E-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="dino_vitb16",
type=str,
help="Name of the model trained with DINO you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--base_model",
action="store_true",
help="Whether to only convert the base model (no projection head weights).",
)
parser.set_defaults(base_model=True)
__UpperCamelCase = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 26 | from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int = 16 , SCREAMING_SNAKE_CASE__ : int = 88 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : int = 32 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "geglu" , SCREAMING_SNAKE_CASE__ : Optional[int] = None , ) -> Optional[int]:
super().__init__()
A : Tuple =nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=SCREAMING_SNAKE_CASE__ , attention_head_dim=SCREAMING_SNAKE_CASE__ , in_channels=SCREAMING_SNAKE_CASE__ , num_layers=SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , norm_num_groups=SCREAMING_SNAKE_CASE__ , cross_attention_dim=SCREAMING_SNAKE_CASE__ , attention_bias=SCREAMING_SNAKE_CASE__ , sample_size=SCREAMING_SNAKE_CASE__ , num_vector_embeds=SCREAMING_SNAKE_CASE__ , activation_fn=SCREAMING_SNAKE_CASE__ , num_embeds_ada_norm=SCREAMING_SNAKE_CASE__ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
A : List[Any] =0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
A : str =[77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
A : Optional[int] =[1, 0]
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : bool = True , ) -> Dict:
A : Any =hidden_states
A : int =[]
A : str =0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
A : Optional[int] =encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
A : str =self.transformer_index_for_condition[i]
A : str =self.transformers[transformer_index](
SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , cross_attention_kwargs=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
A : str =encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
A : Any =output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=SCREAMING_SNAKE_CASE__ )
| 305 | 0 |
from __future__ import annotations
class a :
def __init__( self , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = order
# a_{0} ... a_{k}
__SCREAMING_SNAKE_CASE: Any = [1.0] + [0.0] * order
# b_{0} ... b_{k}
__SCREAMING_SNAKE_CASE: List[Any] = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
__SCREAMING_SNAKE_CASE: int = [0.0] * self.order
# y[n-1] ... y[n-k]
__SCREAMING_SNAKE_CASE: Any = [0.0] * self.order
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if len(_lowerCAmelCase ) < self.order:
__SCREAMING_SNAKE_CASE: Tuple = [1.0, *a_coeffs]
if len(_lowerCAmelCase ) != self.order + 1:
__SCREAMING_SNAKE_CASE: Any = (
f"""Expected a_coeffs to have {self.order + 1} elements """
f"""for {self.order}-order filter, got {len(_lowerCAmelCase )}"""
)
raise ValueError(_lowerCAmelCase )
if len(_lowerCAmelCase ) != self.order + 1:
__SCREAMING_SNAKE_CASE: Union[str, Any] = (
f"""Expected b_coeffs to have {self.order + 1} elements """
f"""for {self.order}-order filter, got {len(_lowerCAmelCase )}"""
)
raise ValueError(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[str] = a_coeffs
__SCREAMING_SNAKE_CASE: List[str] = b_coeffs
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
__SCREAMING_SNAKE_CASE: str = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
__SCREAMING_SNAKE_CASE: str = self.input_history[:-1]
__SCREAMING_SNAKE_CASE: List[str] = self.output_history[:-1]
__SCREAMING_SNAKE_CASE: Any = sample
__SCREAMING_SNAKE_CASE: str = result
return result
| 713 |
from math import isclose, sqrt
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> tuple[float, float, float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = point_y / 4 / point_x
__SCREAMING_SNAKE_CASE: Optional[Any] = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
__SCREAMING_SNAKE_CASE: Optional[Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
__SCREAMING_SNAKE_CASE: Dict = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
__SCREAMING_SNAKE_CASE: Dict = outgoing_gradient**2 + 4
__SCREAMING_SNAKE_CASE: Union[str, Any] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
__SCREAMING_SNAKE_CASE: Optional[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100
__SCREAMING_SNAKE_CASE: Optional[int] = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
__SCREAMING_SNAKE_CASE: Optional[Any] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
__SCREAMING_SNAKE_CASE: Union[str, Any] = x_minus if isclose(UpperCamelCase__ , UpperCamelCase__ ) else x_plus
__SCREAMING_SNAKE_CASE: Any = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def lowerCAmelCase ( UpperCamelCase__ : float = 1.4 , UpperCamelCase__ : float = -9.6 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = 0
__SCREAMING_SNAKE_CASE: float = first_x_coord
__SCREAMING_SNAKE_CASE: float = first_y_coord
__SCREAMING_SNAKE_CASE: float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: List[Any] = next_point(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 146 | 0 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
snake_case_ = str(bin(SCREAMING_SNAKE_CASE__ ) )
binary_number += "0" * shift_amount
return binary_number
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
snake_case_ = str(bin(SCREAMING_SNAKE_CASE__ ) )[2:]
if shift_amount >= len(SCREAMING_SNAKE_CASE__ ):
return "0b0"
snake_case_ = binary_number[: len(SCREAMING_SNAKE_CASE__ ) - shift_amount]
return "0b" + shifted_binary_number
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if number >= 0: # Get binary representation of positive number
snake_case_ = '''0''' + str(bin(SCREAMING_SNAKE_CASE__ ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
snake_case_ = len(bin(SCREAMING_SNAKE_CASE__ )[3:] ) # Find 2's complement of number
snake_case_ = bin(abs(SCREAMING_SNAKE_CASE__ ) - (1 << binary_number_length) )[3:]
snake_case_ = (
'''1''' + '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE__ )) + binary_number
)
if shift_amount >= len(SCREAMING_SNAKE_CASE__ ):
return "0b" + binary_number[0] * len(SCREAMING_SNAKE_CASE__ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(SCREAMING_SNAKE_CASE__ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 39 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : Dict , *_UpperCamelCase : int , **_UpperCamelCase : Tuple ) ->None:
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase ) | 39 | 1 |
import unittest
from knapsack import knapsack as k
class UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def __a ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = 0
lowercase__ : int = [0]
lowercase__ : Optional[Any] = [0]
lowercase__ : Optional[int] = len(lowerCamelCase )
self.assertEqual(k.knapsack(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) , 0 )
lowercase__ : Any = [60]
lowercase__ : Dict = [10]
lowercase__ : Any = len(lowerCamelCase )
self.assertEqual(k.knapsack(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) , 0 )
def __a ( self ) -> str:
"""simple docstring"""
lowercase__ : Any = 3
lowercase__ : Union[str, Any] = [1, 2, 3]
lowercase__ : Dict = [3, 2, 1]
lowercase__ : List[Any] = len(lowerCamelCase )
self.assertEqual(k.knapsack(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) , 5 )
def __a ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = 50
lowercase__ : int = [60, 100, 120]
lowercase__ : Optional[Any] = [10, 20, 30]
lowercase__ : List[str] = len(lowerCamelCase )
self.assertEqual(k.knapsack(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) , 220 )
if __name__ == "__main__":
unittest.main() | 705 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def __a ( self ) -> int:
"""simple docstring"""
lowercase__ : str = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
lowercase__ : Optional[Any] = AutoTokenizer.from_pretrained("google/mt5-small" )
lowercase__ : Optional[int] = tokenizer("Hello there" , return_tensors="np" ).input_ids
lowercase__ : Optional[Any] = tokenizer("Hi I am" , return_tensors="np" ).input_ids
lowercase__ : int = shift_tokens_right(lowerCamelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
lowercase__ : List[Any] = model(lowerCamelCase , decoder_input_ids=lowerCamelCase ).logits
lowercase__ : Any = optax.softmax_cross_entropy(lowerCamelCase , onehot(lowerCamelCase , logits.shape[-1] ) ).mean()
lowercase__ : Union[str, Any] = -(labels.shape[-1] * loss.item())
lowercase__ : Any = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 ) | 298 | 0 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
_lowerCAmelCase: Optional[int] = logging.getLogger(__name__)
def _lowercase( __a : Dict , __a : List[Any] ):
a__ =np.argmax(__a , axis=1 )
return np.sum(outputs == labels )
def _lowercase( __a : Union[str, Any] ):
with open(__a , encoding='utf_8' ) as f:
a__ =csv.reader(__a )
a__ =[]
next(__a ) # skip the first line
for line in tqdm(__a ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def _lowercase( __a : int , __a : List[Any] , __a : Dict , __a : Tuple , __a : List[Any] , __a : Tuple ):
a__ =[]
for dataset in encoded_datasets:
a__ =len(__a )
a__ =np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
a__ =np.zeros((n_batch, 2) , dtype=np.intaa )
a__ =np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
a__ =np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(__a ):
a__ =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
a__ =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
a__ =with_conta
a__ =with_conta
a__ =len(__a ) - 1
a__ =len(__a ) - 1
a__ =with_conta
a__ =with_conta
a__ =mc_label
a__ =(input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(__a ) for t in all_inputs ) )
return tensor_datasets
def _lowercase( ):
a__ =argparse.ArgumentParser()
parser.add_argument('--model_name' , type=__a , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=__a , type=__a , required=__a , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=__a , default='' )
parser.add_argument('--eval_dataset' , type=__a , default='' )
parser.add_argument('--seed' , type=__a , default=42 )
parser.add_argument('--num_train_epochs' , type=__a , default=3 )
parser.add_argument('--train_batch_size' , type=__a , default=8 )
parser.add_argument('--eval_batch_size' , type=__a , default=16 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=__a , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=__a , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=__a , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=__a , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=__a , default=6.25e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=__a , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=__a , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=__a , default=0.01 )
parser.add_argument('--lm_coef' , type=__a , default=0.9 )
parser.add_argument('--n_valid' , type=__a , default=374 )
parser.add_argument('--server_ip' , type=__a , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__a , default='' , help='Can be used for distant debugging.' )
a__ =parser.parse_args()
print(__a )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__a )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
a__ =torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
a__ =torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(__a , __a ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
a__ =['_start_', '_delimiter_', '_classify_']
a__ =OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(__a )
a__ =tokenizer.convert_tokens_to_ids(__a )
a__ =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(__a ) )
model.to(__a )
# Load and encode the datasets
def tokenize_and_encode(__a : Any ):
if isinstance(__a , __a ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__a ) )
elif isinstance(__a , __a ):
return obj
return [tokenize_and_encode(__a ) for o in obj]
logger.info('Encoding dataset...' )
a__ =load_rocstories_dataset(args.train_dataset )
a__ =load_rocstories_dataset(args.eval_dataset )
a__ =(train_dataset, eval_dataset)
a__ =tokenize_and_encode(__a )
# Compute the max input length for the Transformer
a__ =model.config.n_positions // 2 - 2
a__ =max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
a__ =min(__a , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
a__ =pre_process_datasets(__a , __a , __a , *__a )
a__ , a__ =tensor_datasets[0], tensor_datasets[1]
a__ =TensorDataset(*__a )
a__ =RandomSampler(__a )
a__ =DataLoader(__a , sampler=__a , batch_size=args.train_batch_size )
a__ =TensorDataset(*__a )
a__ =SequentialSampler(__a )
a__ =DataLoader(__a , sampler=__a , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
a__ =args.max_steps
a__ =args.max_steps // (len(__a ) // args.gradient_accumulation_steps) + 1
else:
a__ =len(__a ) // args.gradient_accumulation_steps * args.num_train_epochs
a__ =list(model.named_parameters() )
a__ =['bias', 'LayerNorm.bias', 'LayerNorm.weight']
a__ =[
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
a__ =AdamW(__a , lr=args.learning_rate , eps=args.adam_epsilon )
a__ =get_linear_schedule_with_warmup(
__a , num_warmup_steps=args.warmup_steps , num_training_steps=__a )
if args.do_train:
a__ , a__ , a__ =0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
a__ =0
a__ =0
a__ =tqdm(__a , desc='Training' )
for step, batch in enumerate(__a ):
a__ =tuple(t.to(__a ) for t in batch )
a__ , a__ , a__ , a__ =batch
a__ =model(__a , mc_token_ids=__a , lm_labels=__a , mc_labels=__a )
a__ =args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
a__ =(
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
a__ ='Training loss: {:.2e} lr: {:.2e}'.format(__a , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
a__ =model.module if hasattr(__a , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
a__ =os.path.join(args.output_dir , __a )
a__ =os.path.join(args.output_dir , __a )
torch.save(model_to_save.state_dict() , __a )
model_to_save.config.to_json_file(__a )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
a__ =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
a__ =OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(__a )
if args.do_eval:
model.eval()
a__ , a__ =0, 0
a__ , a__ =0, 0
for batch in tqdm(__a , desc='Evaluating' ):
a__ =tuple(t.to(__a ) for t in batch )
a__ , a__ , a__ , a__ =batch
with torch.no_grad():
a__ , a__ , a__ , a__ =model(
__a , mc_token_ids=__a , lm_labels=__a , mc_labels=__a )
a__ =mc_logits.detach().cpu().numpy()
a__ =mc_labels.to('cpu' ).numpy()
a__ =accuracy(__a , __a )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
a__ =eval_loss / nb_eval_steps
a__ =eval_accuracy / nb_eval_examples
a__ =tr_loss / nb_tr_steps if args.do_train else None
a__ ={'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
a__ =os.path.join(args.output_dir , 'eval_results.txt' )
with open(__a , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , __a , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 20 |
from manim import *
class lowercase_ (lowercase__ ):
def __UpperCamelCase ( self) -> List[Any]:
a__ =Rectangle(height=0.5 , width=0.5)
a__ =Rectangle(height=0.46 , width=0.46).set_stroke(width=0)
a__ =[mem.copy() for i in range(6)]
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(lowercase_ , lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('CPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
cpu.move_to([-2.5, -0.5, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(4)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('GPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
gpu.move_to([-1, -1, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Model' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
model.move_to([3, -1.0, 0])
self.add(lowercase_)
a__ =[]
for i, rect in enumerate(lowercase_):
rect.set_stroke(lowercase_)
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
a__ =Rectangle(height=0.46 / 4 , width=0.46 / 3).set_stroke(width=0.0).set_fill(lowercase_ , opacity=0.7)
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.02 , direction=lowercase_)
cpu_target.set_x(cpu_target.get_x() + 0.1)
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0)
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0)
self.add(lowercase_)
cpu_targs.append(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Loaded Checkpoint' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4)
checkpoint.move_to([3, 0.5, 0])
a__ =Square(side_length=2.2)
key.move_to([-5, 2, 0])
a__ =MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>โ</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
self.add(lowercase_ , lowercase_)
a__ =MarkupText(
F"""<span fgcolor='{BLUE}'>โ</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left())
a__ =MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0])
self.play(Write(lowercase_) , Write(lowercase_))
self.play(Write(lowercase_ , run_time=1) , Create(lowercase_ , run_time=1))
a__ =[]
a__ =[]
for i, rect in enumerate(lowercase_):
a__ =fill.copy().set_fill(lowercase_ , opacity=0.7)
target.move_to(lowercase_)
first_animations.append(GrowFromCenter(lowercase_ , run_time=1))
a__ =target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1])
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5])
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5))
self.play(*lowercase_)
self.play(*lowercase_)
self.wait()
| 20 | 1 |
'''simple docstring'''
__magic_name__ ={
'''Pillow''': '''Pillow<10.0.0''',
'''accelerate''': '''accelerate>=0.20.3''',
'''av''': '''av==9.2.0''',
'''beautifulsoup4''': '''beautifulsoup4''',
'''black''': '''black~=23.1''',
'''codecarbon''': '''codecarbon==1.2.0''',
'''cookiecutter''': '''cookiecutter==1.7.3''',
'''dataclasses''': '''dataclasses''',
'''datasets''': '''datasets!=2.5.0''',
'''decord''': '''decord==0.6.0''',
'''deepspeed''': '''deepspeed>=0.9.3''',
'''diffusers''': '''diffusers''',
'''dill''': '''dill<0.3.5''',
'''evaluate''': '''evaluate>=0.2.0''',
'''fairscale''': '''fairscale>0.3''',
'''faiss-cpu''': '''faiss-cpu''',
'''fastapi''': '''fastapi''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1,<=0.7.0''',
'''ftfy''': '''ftfy''',
'''fugashi''': '''fugashi>=1.0''',
'''GitPython''': '''GitPython<3.1.19''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.14.1,<1.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''ipadic''': '''ipadic>=1.0.0,<2.0''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2,<=0.4.13''',
'''jaxlib''': '''jaxlib>=0.1.65,<=0.4.13''',
'''jieba''': '''jieba''',
'''kenlm''': '''kenlm''',
'''keras-nlp''': '''keras-nlp>=0.3.1''',
'''librosa''': '''librosa''',
'''nltk''': '''nltk''',
'''natten''': '''natten>=0.14.6''',
'''numpy''': '''numpy>=1.17''',
'''onnxconverter-common''': '''onnxconverter-common''',
'''onnxruntime-tools''': '''onnxruntime-tools>=1.4.2''',
'''onnxruntime''': '''onnxruntime>=1.4.0''',
'''opencv-python''': '''opencv-python''',
'''optuna''': '''optuna''',
'''optax''': '''optax>=0.0.8,<=0.1.4''',
'''packaging''': '''packaging>=20.0''',
'''parameterized''': '''parameterized''',
'''phonemizer''': '''phonemizer''',
'''protobuf''': '''protobuf''',
'''psutil''': '''psutil''',
'''pyyaml''': '''pyyaml>=5.1''',
'''pydantic''': '''pydantic<2''',
'''pytest''': '''pytest>=7.2.0''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''python''': '''python>=3.8.0''',
'''ray[tune]''': '''ray[tune]''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''rhoknp''': '''rhoknp>=1.1.0,<1.3.1''',
'''rjieba''': '''rjieba''',
'''rouge-score''': '''rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1''',
'''ruff''': '''ruff>=0.0.241,<=0.0.259''',
'''sacrebleu''': '''sacrebleu>=1.4.12,<2.0.0''',
'''sacremoses''': '''sacremoses''',
'''safetensors''': '''safetensors>=0.3.1''',
'''sagemaker''': '''sagemaker>=2.31.0''',
'''scikit-learn''': '''scikit-learn''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''sigopt''': '''sigopt''',
'''starlette''': '''starlette''',
'''sudachipy''': '''sudachipy>=0.6.6''',
'''sudachidict_core''': '''sudachidict_core>=20220729''',
'''tensorflow-cpu''': '''tensorflow-cpu>=2.6,<2.14''',
'''tensorflow''': '''tensorflow>=2.6,<2.14''',
'''tensorflow-text''': '''tensorflow-text<2.14''',
'''tf2onnx''': '''tf2onnx''',
'''timeout-decorator''': '''timeout-decorator''',
'''timm''': '''timm''',
'''tokenizers''': '''tokenizers>=0.11.1,!=0.11.3,<0.14''',
'''torch''': '''torch>=1.9,!=1.12.0''',
'''torchaudio''': '''torchaudio''',
'''torchvision''': '''torchvision''',
'''pyctcdecode''': '''pyctcdecode>=0.4.0''',
'''tqdm''': '''tqdm>=4.27''',
'''unidic''': '''unidic>=1.0.2''',
'''unidic_lite''': '''unidic_lite>=1.0.7''',
'''urllib3''': '''urllib3<2.0.0''',
'''uvicorn''': '''uvicorn''',
}
| 715 | import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__magic_name__ =logging.get_logger(__name__)
__magic_name__ ='''โ'''
__magic_name__ ={'''vocab_file''': '''sentencepiece.bpe.model'''}
__magic_name__ ={
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'''
),
}
}
__magic_name__ ={
'''facebook/nllb-200-distilled-600M''': 1024,
}
# fmt: off
__magic_name__ =['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class _A ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Dict =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[str] =["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ : List[int] =[]
SCREAMING_SNAKE_CASE_ : List[int] =[]
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> str:
'''simple docstring'''
UpperCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
UpperCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCamelCase__ = legacy_behaviour
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , src_lang=SCREAMING_SNAKE_CASE_ , tgt_lang=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | 'โn' | 'โm' | 'โt' | 'โk' | 'โa'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | 'โn' | 'โm' | 'โt' | 'โk' | 'โa' | 'โs'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase__ = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase__ = 1
UpperCamelCase__ = len(self.sp_model )
UpperCamelCase__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(SCREAMING_SNAKE_CASE_ )
}
UpperCamelCase__ = {v: k for k, v in self.lang_code_to_id.items()}
UpperCamelCase__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCamelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCamelCase__ = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
UpperCamelCase__ = src_lang if src_lang is not None else '''eng_Latn'''
UpperCamelCase__ = self.lang_code_to_id[self._src_lang]
UpperCamelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__(self ) -> Any:
'''simple docstring'''
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = None
UpperCamelCase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__(self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase__ = {}
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _a (self ) -> Tuple:
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _a (self ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _a (self , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
UpperCamelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [1] * len(self.prefix_tokens )
UpperCamelCase__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE_ )) + suffix_ones
return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE_ )) + ([0] * len(SCREAMING_SNAKE_CASE_ )) + suffix_ones
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCamelCase__ = src_lang
UpperCamelCase__ = self(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tgt_lang_id
return inputs
def _a (self ) -> str:
'''simple docstring'''
UpperCamelCase__ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a (self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase__ = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _a (self , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _a (self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = ''''''.join(SCREAMING_SNAKE_CASE_ ).replace(SCREAMING_SNAKE_CASE_ , ''' ''' ).strip()
return out_string
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as fi:
UpperCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = "eng_Latn" , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "fra_Latn" , **SCREAMING_SNAKE_CASE_ , ) -> BatchEncoding:
'''simple docstring'''
UpperCamelCase__ = src_lang
UpperCamelCase__ = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _a (self ) -> Union[str, Any]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def _a (self ) -> Dict:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a (self , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
UpperCamelCase__ = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
UpperCamelCase__ = []
UpperCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
UpperCamelCase__ = [self.cur_lang_code]
UpperCamelCase__ = [self.eos_token_id]
def _a (self , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
UpperCamelCase__ = self.lang_code_to_id[lang]
if self.legacy_behaviour:
UpperCamelCase__ = []
UpperCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
UpperCamelCase__ = [self.cur_lang_code]
UpperCamelCase__ = [self.eos_token_id]
| 469 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__snake_case : List[Any] ={'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
snake_case_ =MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
snake_case_ =TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
snake_case_ ={config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
snake_case_ ={
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = ZeroShotClassificationPipeline(
model=__lowerCamelCase ,tokenizer=__lowerCamelCase ,candidate_labels=['''polics''', '''health'''] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = classifier('''Who are you voting for in 2020?''' ,candidate_labels='''politics''' )
self.assertEqual(__lowerCamelCase ,{'''sequence''': ANY(__lowerCamelCase ), '''labels''': [ANY(__lowerCamelCase )], '''scores''': [ANY(__lowerCamelCase )]} )
# No kwarg
lowerCAmelCase__ : int = classifier('''Who are you voting for in 2020?''' ,['''politics'''] )
self.assertEqual(__lowerCamelCase ,{'''sequence''': ANY(__lowerCamelCase ), '''labels''': [ANY(__lowerCamelCase )], '''scores''': [ANY(__lowerCamelCase )]} )
lowerCAmelCase__ : Tuple = classifier('''Who are you voting for in 2020?''' ,candidate_labels=['''politics'''] )
self.assertEqual(__lowerCamelCase ,{'''sequence''': ANY(__lowerCamelCase ), '''labels''': [ANY(__lowerCamelCase )], '''scores''': [ANY(__lowerCamelCase )]} )
lowerCAmelCase__ : List[Any] = classifier('''Who are you voting for in 2020?''' ,candidate_labels='''politics, public health''' )
self.assertEqual(
__lowerCamelCase ,{'''sequence''': ANY(__lowerCamelCase ), '''labels''': [ANY(__lowerCamelCase ), ANY(__lowerCamelCase )], '''scores''': [ANY(__lowerCamelCase ), ANY(__lowerCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) ,1.0 )
lowerCAmelCase__ : Tuple = classifier('''Who are you voting for in 2020?''' ,candidate_labels=['''politics''', '''public health'''] )
self.assertEqual(
__lowerCamelCase ,{'''sequence''': ANY(__lowerCamelCase ), '''labels''': [ANY(__lowerCamelCase ), ANY(__lowerCamelCase )], '''scores''': [ANY(__lowerCamelCase ), ANY(__lowerCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) ,1.0 )
lowerCAmelCase__ : List[Any] = classifier(
'''Who are you voting for in 2020?''' ,candidate_labels='''politics''' ,hypothesis_template='''This text is about {}''' )
self.assertEqual(__lowerCamelCase ,{'''sequence''': ANY(__lowerCamelCase ), '''labels''': [ANY(__lowerCamelCase )], '''scores''': [ANY(__lowerCamelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
lowerCAmelCase__ : Any = classifier(['''I am happy'''] ,['''positive''', '''negative'''] )
self.assertEqual(
__lowerCamelCase ,[
{'''sequence''': ANY(__lowerCamelCase ), '''labels''': [ANY(__lowerCamelCase ), ANY(__lowerCamelCase )], '''scores''': [ANY(__lowerCamelCase ), ANY(__lowerCamelCase )]}
for i in range(1 )
] ,)
lowerCAmelCase__ : Optional[Any] = classifier(['''I am happy''', '''I am sad'''] ,['''positive''', '''negative'''] )
self.assertEqual(
__lowerCamelCase ,[
{'''sequence''': ANY(__lowerCamelCase ), '''labels''': [ANY(__lowerCamelCase ), ANY(__lowerCamelCase )], '''scores''': [ANY(__lowerCamelCase ), ANY(__lowerCamelCase )]}
for i in range(2 )
] ,)
with self.assertRaises(__lowerCamelCase ):
classifier('''''' ,candidate_labels='''politics''' )
with self.assertRaises(__lowerCamelCase ):
classifier(__lowerCamelCase ,candidate_labels='''politics''' )
with self.assertRaises(__lowerCamelCase ):
classifier('''Who are you voting for in 2020?''' ,candidate_labels='''''' )
with self.assertRaises(__lowerCamelCase ):
classifier('''Who are you voting for in 2020?''' ,candidate_labels=__lowerCamelCase )
with self.assertRaises(__lowerCamelCase ):
classifier(
'''Who are you voting for in 2020?''' ,candidate_labels='''politics''' ,hypothesis_template='''Not formatting template''' ,)
with self.assertRaises(__lowerCamelCase ):
classifier(
'''Who are you voting for in 2020?''' ,candidate_labels='''politics''' ,hypothesis_template=__lowerCamelCase ,)
self.run_entailment_id(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = zero_shot_classifier.model.config
lowerCAmelCase__ : Tuple = config.labelaid
lowerCAmelCase__ : Any = zero_shot_classifier.entailment_id
lowerCAmelCase__ : Optional[int] = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id ,-1 )
lowerCAmelCase__ : str = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id ,0 )
lowerCAmelCase__ : Union[str, Any] = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id ,0 )
lowerCAmelCase__ : str = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id ,2 )
lowerCAmelCase__ : List[Any] = original_labelaid
self.assertEqual(__lowerCamelCase ,zero_shot_classifier.entailment_id )
@require_torch
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = pipeline(
'''zero-shot-classification''' ,model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' ,framework='''pt''' ,)
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 1_00 ,candidate_labels=['''politics''', '''public health''', '''science'''] )
@require_torch
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ : str = pipeline(
'''zero-shot-classification''' ,model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' ,framework='''pt''' ,)
lowerCAmelCase__ : int = zero_shot_classifier(
'''Who are you voting for in 2020?''' ,candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(__lowerCamelCase ) ,{
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} ,)
@require_tf
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = pipeline(
'''zero-shot-classification''' ,model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' ,framework='''tf''' ,)
lowerCAmelCase__ : List[str] = zero_shot_classifier(
'''Who are you voting for in 2020?''' ,candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(__lowerCamelCase ) ,{
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} ,)
@slow
@require_torch
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = pipeline('''zero-shot-classification''' ,model='''roberta-large-mnli''' ,framework='''pt''' )
lowerCAmelCase__ : int = zero_shot_classifier(
'''Who are you voting for in 2020?''' ,candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(__lowerCamelCase ) ,{
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} ,)
lowerCAmelCase__ : Optional[int] = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' ,candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] ,multi_label=__lowerCamelCase ,)
self.assertEqual(
nested_simplify(__lowerCamelCase ) ,{
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} ,)
@slow
@require_tf
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = pipeline('''zero-shot-classification''' ,model='''roberta-large-mnli''' ,framework='''tf''' )
lowerCAmelCase__ : List[Any] = zero_shot_classifier(
'''Who are you voting for in 2020?''' ,candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(__lowerCamelCase ) ,{
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} ,)
lowerCAmelCase__ : Dict = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' ,candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] ,multi_label=__lowerCamelCase ,)
self.assertEqual(
nested_simplify(__lowerCamelCase ) ,{
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} ,)
| 647 |
import math
import unittest
def lowerCAmelCase__ ( lowerCamelCase_ : int):
'''simple docstring'''
assert isinstance(lowerCamelCase_ ,lowerCamelCase_) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(lowerCamelCase_) + 1) ,6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
with self.assertRaises(__lowerCamelCase ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) ,'''Zero doesn\'t have any positive factors, primes must have exactly two.''' ,)
self.assertFalse(
is_prime(1 ) ,'''One only has 1 positive factor, primes must have exactly two.''' ,)
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 647 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a: List[Any] = logging.get_logger(__name__)
_a: Union[str, Any] = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class __UpperCamelCase ( lowercase ):
SCREAMING_SNAKE_CASE__ = 'roberta-prelayernorm'
def __init__( self : Any , lowerCAmelCase : List[Any]=50_265 , lowerCAmelCase : Union[str, Any]=768 , lowerCAmelCase : str=12 , lowerCAmelCase : Optional[int]=12 , lowerCAmelCase : str=3_072 , lowerCAmelCase : Dict="gelu" , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Dict=512 , lowerCAmelCase : List[str]=2 , lowerCAmelCase : List[Any]=0.02 , lowerCAmelCase : List[Any]=1e-12 , lowerCAmelCase : Tuple=1 , lowerCAmelCase : List[str]=0 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : str="absolute" , lowerCAmelCase : Dict=True , lowerCAmelCase : List[Any]=None , **lowerCAmelCase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class __UpperCamelCase ( lowercase ):
@property
def __A ( self : List[str] ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 707 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_a: Optional[Any] = logging.get_logger(__name__)
def __lowerCAmelCase ( A ):
if isinstance(A , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(A , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(A ):
return [[videos]]
raise ValueError(F"Could not make batched video from {videos}" )
class __UpperCamelCase ( lowercase ):
SCREAMING_SNAKE_CASE__ = ['pixel_values']
def __init__( self : int , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : bool = True , lowerCAmelCase : Union[int, float] = 1 / 255 , lowerCAmelCase : bool = True , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , **lowerCAmelCase : List[Any] , ):
'''simple docstring'''
super().__init__(**lowerCAmelCase )
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 256}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = offset
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __A ( self : List[Any] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
if "shortest_edge" in size:
UpperCAmelCase_ = get_resize_output_image_size(lowerCAmelCase , size["shortest_edge"] , default_to_square=lowerCAmelCase )
elif "height" in size and "width" in size:
UpperCAmelCase_ = (size["height"], size["width"])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def __A ( self : List[str] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Dict , ):
'''simple docstring'''
UpperCAmelCase_ = get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(lowerCAmelCase , size=(size["height"], size["width"]) , data_format=lowerCAmelCase , **lowerCAmelCase )
def __A ( self : List[Any] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[int, float] , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ = image.astype(np.floataa )
if offset:
UpperCAmelCase_ = image - (scale / 2)
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def __A ( self : Tuple , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : int , ):
'''simple docstring'''
return normalize(lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def __A ( self : Union[str, Any] , lowerCAmelCase : ImageInput , lowerCAmelCase : bool = None , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = None , lowerCAmelCase : bool = None , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : bool = None , lowerCAmelCase : float = None , lowerCAmelCase : bool = None , lowerCAmelCase : bool = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = to_numpy_array(lowerCAmelCase )
if do_resize:
UpperCAmelCase_ = self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase )
if do_center_crop:
UpperCAmelCase_ = self.center_crop(lowerCAmelCase , size=lowerCAmelCase )
if do_rescale:
UpperCAmelCase_ = self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase , offset=lowerCAmelCase )
if do_normalize:
UpperCAmelCase_ = self.normalize(image=lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase )
UpperCAmelCase_ = to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase )
return image
def __A ( self : Optional[int] , lowerCAmelCase : ImageInput , lowerCAmelCase : bool = None , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = None , lowerCAmelCase : bool = None , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : bool = None , lowerCAmelCase : float = None , lowerCAmelCase : bool = None , lowerCAmelCase : bool = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[str, TensorType]] = None , lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase : Dict , ):
'''simple docstring'''
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = offset if offset is not None else self.offset
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" )
if not valid_images(lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ = make_batched(lowerCAmelCase )
UpperCAmelCase_ = [
[
self._preprocess_image(
image=lowerCAmelCase , do_resize=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , do_center_crop=lowerCAmelCase , crop_size=lowerCAmelCase , do_rescale=lowerCAmelCase , rescale_factor=lowerCAmelCase , offset=lowerCAmelCase , do_normalize=lowerCAmelCase , image_mean=lowerCAmelCase , image_std=lowerCAmelCase , data_format=lowerCAmelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase_ = {"pixel_values": videos}
return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase ) | 268 | 0 |
import torch
def snake_case ( ):
'''simple docstring'''
if torch.cuda.is_available():
__lowercase = torch.cuda.device_count()
else:
__lowercase = 0
print(F'Successfully ran on {num_gpus} GPUs' )
if __name__ == "__main__":
main()
| 80 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Dict = ['image_processor', 'tokenizer']
_snake_case : int = 'BlipImageProcessor'
_snake_case : Tuple = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] ) -> int:
'''simple docstring'''
_UpperCamelCase = False
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = self.image_processor
def __call__( self : int , lowerCAmelCase__ : ImageInput = None , lowerCAmelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase__ : Union[bool, str, TruncationStrategy] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
_UpperCamelCase = self.tokenizer
_UpperCamelCase = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
return text_encoding
# add pixel_values
_UpperCamelCase = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
if text is not None:
_UpperCamelCase = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
else:
_UpperCamelCase = None
if text_encoding is not None:
encoding_image_processor.update(lowerCAmelCase__ )
return encoding_image_processor
def snake_case__ ( self : Any , *lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : Dict ) -> List[str]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : Tuple , *lowerCAmelCase__ : int , **lowerCAmelCase__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def snake_case__ ( self : Any ) -> str:
'''simple docstring'''
_UpperCamelCase = self.tokenizer.model_input_names
_UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 98 | 0 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : List[str] =R"""\w+[.]\d+"""
__magic_name__ : Tuple =re.findall(lowerCamelCase , lowerCamelCase )
for pat in pats:
__magic_name__ : Tuple =key.replace(lowerCamelCase , """_""".join(pat.split(""".""" ) ) )
return key
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : Any =pt_tuple_key[:-1] + ("""scale""",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
__magic_name__ : List[str] =pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
__magic_name__ : int =pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
__magic_name__ : Union[str, Any] =pt_tuple_key[:-1] + ("""embedding""",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
__magic_name__ : Any =pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
__magic_name__ : Optional[int] =pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__magic_name__ : Dict =pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight":
__magic_name__ : int =pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__magic_name__ : Tuple =pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__magic_name__ : Any =pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=42 ):
# Step 1: Convert pytorch tensor to numpy
__magic_name__ : Any ={k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
__magic_name__ : Any =flax_model.init_weights(PRNGKey(lowerCamelCase ) )
__magic_name__ : Dict =flatten_dict(lowerCamelCase )
__magic_name__ : Dict ={}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__magic_name__ : str =rename_key(lowerCamelCase )
__magic_name__ : Union[str, Any] =tuple(renamed_pt_key.split(""".""" ) )
# Correctly rename weight parameters
__magic_name__ , __magic_name__ : int =rename_key_and_reshape_tensor(lowerCamelCase , lowerCamelCase , lowerCamelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
__magic_name__ : Optional[int] =jnp.asarray(lowerCamelCase )
return unflatten_dict(lowerCamelCase )
| 367 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
UpperCAmelCase_ : Dict = 637_8137.0
UpperCAmelCase_ : List[Any] = 635_6752.31_4245
UpperCAmelCase_ : List[str] = 6378137
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : str =(AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
__magic_name__ : str =atan((1 - flattening) * tan(radians(lowerCamelCase ) ) )
__magic_name__ : List[Any] =atan((1 - flattening) * tan(radians(lowerCamelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
__magic_name__ : List[Any] =haversine_distance(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
__magic_name__ : Tuple =(b_lata + b_lata) / 2
__magic_name__ : int =(b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
__magic_name__ : Optional[int] =(sin(lowerCamelCase ) ** 2) * (cos(lowerCamelCase ) ** 2)
__magic_name__ : Any =cos(sigma / 2 ) ** 2
__magic_name__ : List[Any] =(sigma - sin(lowerCamelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
__magic_name__ : Any =(cos(lowerCamelCase ) ** 2) * (sin(lowerCamelCase ) ** 2)
__magic_name__ : Optional[Any] =sin(sigma / 2 ) ** 2
__magic_name__ : str =(sigma + sin(lowerCamelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 | 1 |
'''simple docstring'''
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
while a != 0:
snake_case__ : Tuple = b % a, a
return b
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> str:
if gcd(UpperCAmelCase_ , UpperCAmelCase_ ) != 1:
snake_case__ : Tuple = f"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(UpperCAmelCase_ )
snake_case__ : Any = 1, 0, a
snake_case__ : List[str] = 0, 1, m
while va != 0:
snake_case__ : Any = ua // va
snake_case__ : List[str] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 374 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowercase_: Tuple = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
snake_case__ : Any = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_)
lowercase_: Dict = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
snake_case__ : Tuple = list(s_dict.keys())
for key in keys:
snake_case__ : str = key
for k, v in WHISPER_MAPPING.items():
if k in key:
snake_case__ : Union[str, Any] = new_key.replace(UpperCAmelCase_ , UpperCAmelCase_)
print(F'{key} -> {new_key}')
snake_case__ : Dict = s_dict.pop(UpperCAmelCase_)
return s_dict
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
snake_case__ , snake_case__ : Any = emb.weight.shape
snake_case__ : List[Any] = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ , bias=UpperCAmelCase_)
snake_case__ : int = emb.weight.data
return lin_layer
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_)
snake_case__ : Dict = os.path.basename(UpperCAmelCase_)
snake_case__ : Tuple = url.split("""/""")[-2]
snake_case__ : Optional[int] = os.path.join(UpperCAmelCase_ , UpperCAmelCase_)
if os.path.exists(UpperCAmelCase_) and not os.path.isfile(UpperCAmelCase_):
raise RuntimeError(F'{download_target} exists and is not a regular file')
if os.path.isfile(UpperCAmelCase_):
snake_case__ : Optional[int] = open(UpperCAmelCase_ , """rb""").read()
if hashlib.shaaaa(UpperCAmelCase_).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F'{download_target} exists, but the SHA256 checksum does not match; re-downloading the file')
with urllib.request.urlopen(UpperCAmelCase_) as source, open(UpperCAmelCase_ , """wb""") as output:
with tqdm(
total=int(source.info().get("""Content-Length""")) , ncols=80 , unit="""iB""" , unit_scale=UpperCAmelCase_ , unit_divisor=1_024) as loop:
while True:
snake_case__ : Union[str, Any] = source.read(8_192)
if not buffer:
break
output.write(UpperCAmelCase_)
loop.update(len(UpperCAmelCase_))
snake_case__ : Optional[int] = open(UpperCAmelCase_ , """rb""").read()
if hashlib.shaaaa(UpperCAmelCase_).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""")
return model_bytes
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
if ".pt" not in checkpoint_path:
snake_case__ : List[Any] = _download(_MODELS[checkpoint_path])
else:
snake_case__ : Union[str, Any] = torch.load(UpperCAmelCase_ , map_location="""cpu""")
snake_case__ : Union[str, Any] = original_checkpoint["""dims"""]
snake_case__ : Optional[int] = original_checkpoint["""model_state_dict"""]
snake_case__ : int = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(UpperCAmelCase_)
rename_keys(UpperCAmelCase_)
snake_case__ : List[Any] = True
snake_case__ : Dict = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
snake_case__ : List[Any] = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=UpperCAmelCase_ , decoder_ffn_dim=UpperCAmelCase_ , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
snake_case__ : int = WhisperForConditionalGeneration(UpperCAmelCase_)
snake_case__ , snake_case__ : Tuple = model.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_)
if len(UpperCAmelCase_) > 0 and not set(UpperCAmelCase_) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
F' but all the following weights are missing {missing}')
if tie_embeds:
snake_case__ : Dict = make_linear_from_emb(model.model.decoder.embed_tokens)
else:
snake_case__ : Optional[int] = proj_out_weights
model.save_pretrained(UpperCAmelCase_)
if __name__ == "__main__":
lowercase_: int = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
lowercase_: int = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 648 | 0 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self ,a_ ,a_=7 ,a_=3 ,a_=18 ,a_=30 ,a_=400 ,a_=True ,a_=None ,a_=True ,):
"""simple docstring"""
lowerCAmelCase__ = size if size is not None else {"""height""": 18, """width""": 18}
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = image_size
lowerCAmelCase__ = min_resolution
lowerCAmelCase__ = max_resolution
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = do_normalize
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866443634033203, 0.6618829369544983, 0.3891746401786804],
[-0.6042559146881104, -0.02295008860528469, 0.5423797369003296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class __snake_case ( _UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = ImageGPTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = ImageGPTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ ,'clusters' ) )
self.assertTrue(hasattr(lowercase_ ,'do_resize' ) )
self.assertTrue(hasattr(lowercase_ ,'size' ) )
self.assertTrue(hasattr(lowercase_ ,'do_normalize' ) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} )
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
lowerCAmelCase__ = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase_ ,obj[key] ) )
else:
self.assertEqual(obj[key] ,lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = os.path.join(lowercase_ ,'image_processor.json' )
image_processor_first.to_json_file(lowercase_ )
lowerCAmelCase__ = self.image_processing_class.from_json_file(lowercase_ ).to_dict()
lowerCAmelCase__ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase_ ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowercase_ )
lowerCAmelCase__ = self.image_processing_class.from_pretrained(lowercase_ ).to_dict()
lowerCAmelCase__ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase_ ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,lowercase_ )
@unittest.skip('ImageGPT requires clusters at initialization' )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
def UpperCAmelCase_ ( ) -> int:
"""simple docstring"""
lowerCAmelCase__ = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' )
lowerCAmelCase__ = Image.open(dataset[4]['file'] )
lowerCAmelCase__ = Image.open(dataset[5]['file'] )
lowerCAmelCase__ = [imagea, imagea]
return images
@require_vision
@require_torch
class __snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' )
lowerCAmelCase__ = prepare_images()
# test non-batched
lowerCAmelCase__ = image_processing(images[0] ,return_tensors='pt' )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(1, 1024) )
lowerCAmelCase__ = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() ,lowercase_ )
# test batched
lowerCAmelCase__ = image_processing(lowercase_ ,return_tensors='pt' )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(2, 1024) )
lowerCAmelCase__ = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() ,lowercase_ )
| 708 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {}
class __snake_case ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ = 'llama'
SCREAMING_SNAKE_CASE__ = ['past_key_values']
def __init__( self ,a_=3_2000 ,a_=4096 ,a_=1_1008 ,a_=32 ,a_=32 ,a_=None ,a_="silu" ,a_=2048 ,a_=0.02 ,a_=1e-6 ,a_=True ,a_=0 ,a_=1 ,a_=2 ,a_=1 ,a_=False ,a_=None ,**a_ ,):
"""simple docstring"""
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = num_key_value_heads
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = rms_norm_eps
lowerCAmelCase__ = pretraining_tp
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=a_ ,bos_token_id=a_ ,eos_token_id=a_ ,tie_word_embeddings=a_ ,**a_ ,)
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,a_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'got {self.rope_scaling}' )
lowerCAmelCase__ = self.rope_scaling.get('type' ,a_ )
lowerCAmelCase__ = self.rope_scaling.get('factor' ,a_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(a_ ,a_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 604 | 0 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_snake_case : str = logging.get_logger(__name__)
_snake_case : Tuple = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = "codegen"
__UpperCAmelCase : Dict = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[int] , lowerCamelCase : Dict=50400 , lowerCamelCase : int=2048 , lowerCamelCase : List[Any]=2048 , lowerCamelCase : List[Any]=4096 , lowerCamelCase : Dict=28 , lowerCamelCase : List[str]=16 , lowerCamelCase : str=64 , lowerCamelCase : Any=None , lowerCamelCase : Optional[int]="gelu_new" , lowerCamelCase : Any=0.0 , lowerCamelCase : Union[str, Any]=0.0 , lowerCamelCase : int=0.0 , lowerCamelCase : List[str]=1E-5 , lowerCamelCase : List[Any]=0.02 , lowerCamelCase : List[str]=True , lowerCamelCase : Optional[Any]=50256 , lowerCamelCase : int=50256 , lowerCamelCase : str=False , **lowerCamelCase : int , ) -> Union[str, Any]:
__snake_case : str = vocab_size
__snake_case : List[Any] = n_ctx
__snake_case : Tuple = n_positions
__snake_case : int = n_embd
__snake_case : List[str] = n_layer
__snake_case : int = n_head
__snake_case : Union[str, Any] = n_inner
__snake_case : List[Any] = rotary_dim
__snake_case : Tuple = activation_function
__snake_case : str = resid_pdrop
__snake_case : Dict = embd_pdrop
__snake_case : Optional[int] = attn_pdrop
__snake_case : Dict = layer_norm_epsilon
__snake_case : Tuple = initializer_range
__snake_case : Union[str, Any] = use_cache
__snake_case : Dict = bos_token_id
__snake_case : Any = eos_token_id
super().__init__(
bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , tie_word_embeddings=lowerCamelCase , **lowerCamelCase )
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase : PretrainedConfig , lowerCamelCase : str = "default" , lowerCamelCase : List[PatchingSpec] = None , lowerCamelCase : bool = False , ) -> Any:
super().__init__(lowerCamelCase , task=lowerCamelCase , patching_specs=lowerCamelCase , use_past=lowerCamelCase )
if not getattr(self._config , "pad_token_id" , lowerCamelCase ):
# TODO: how to do that better?
__snake_case : Any = 0
@property
def __snake_case ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
__snake_case : Dict = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase , direction="inputs" )
__snake_case : Optional[Any] = {0: "batch", 1: "past_sequence + sequence"}
else:
__snake_case : int = {0: "batch", 1: "sequence"}
return common_inputs
@property
def __snake_case ( self : int ) -> int:
return self._config.n_layer
@property
def __snake_case ( self : List[str] ) -> int:
return self._config.n_head
def __snake_case ( self : Tuple , lowerCamelCase : PreTrainedTokenizer , lowerCamelCase : int = -1 , lowerCamelCase : int = -1 , lowerCamelCase : bool = False , lowerCamelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
__snake_case : Union[str, Any] = super(lowerCamelCase , self ).generate_dummy_inputs(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
# We need to order the input in the way they appears in the forward()
__snake_case : List[str] = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__snake_case , __snake_case : Optional[Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__snake_case : Optional[Any] = seqlen + 2
__snake_case : str = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__snake_case : str = [
(torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(self.num_layers )
]
__snake_case : List[Any] = common_inputs["attention_mask"]
if self.use_past:
__snake_case : List[Any] = ordered_inputs["attention_mask"].dtype
__snake_case : Any = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(lowerCamelCase , lowerCamelCase , dtype=lowerCamelCase )] , dim=1 )
return ordered_inputs
@property
def __snake_case ( self : Tuple ) -> int:
return 13
| 81 |
def __lowerCamelCase ( __a :int ) -> Dict:
"""simple docstring"""
A__ = len(__a )
A__ = sum(__a )
A__ = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
A__ = True
for i in range(1 , s + 1 ):
A__ = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
A__ = dp[i][j - 1]
if arr[i - 1] <= j:
A__ = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
A__ = s - 2 * j
break
return diff
| 176 | 0 |
"""simple docstring"""
import os
import pytest
from transformers.dynamic_module_utils import get_imports
a__ : Optional[Any] = """
import os
"""
a__ : Optional[Any] = """
def foo():
import os
return False
"""
a__ : Tuple = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
a__ : List[Any] = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
a__ : Optional[Any] = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
a__ : Optional[Any] = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
a__ : Dict = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
a__ : str = """
import os
try:
import bar
except:
raise ValueError()
"""
a__ : List[Any] = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
a__ : List[str] = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
a__ : Union[str, Any] = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('case', __lowerCamelCase )
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = os.path.join(__lowerCamelCase, 'test_file.py' )
with open(__lowerCamelCase, 'w' ) as _tmp_file:
_tmp_file.write(__lowerCamelCase )
_lowerCAmelCase = get_imports(__lowerCamelCase )
assert parsed_imports == ["os"]
| 720 |
"""simple docstring"""
import qiskit
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_lowerCAmelCase = qiskit.QuantumCircuit(__lowerCamelCase, __lowerCamelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1], [0, 1] )
# Execute the circuit on the qasm simulator
_lowerCAmelCase = qiskit.execute(__lowerCamelCase, __lowerCamelCase, shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__lowerCamelCase )
if __name__ == "__main__":
a__ : Optional[Any] = single_qubit_measure(2, 2)
print(f'Total count for various states are: {counts}')
| 309 | 0 |
'''simple docstring'''
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = CustomTokenizer
pass
| 38 |
'''simple docstring'''
from collections.abc import Callable
class __snake_case :
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : Callable | None = None ) -> None:
# Stores actual heap items.
lowerCAmelCase_ : list = []
# Stores indexes of each item for supporting updates and deletion.
lowerCAmelCase_ : dict = {}
# Stores current size of heap.
lowerCAmelCase_ : List[Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
lowerCAmelCase_ : Tuple = key or (lambda lowerCamelCase : x)
def __lowercase ( self : Optional[Any] , lowerCamelCase : int ) -> int | None:
return int((i - 1) / 2 ) if i > 0 else None
def __lowercase ( self : Union[str, Any] , lowerCamelCase : int ) -> int | None:
lowerCAmelCase_ : List[str] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowercase ( self : Optional[Any] , lowerCamelCase : int ) -> int | None:
lowerCAmelCase_ : List[Any] = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowercase ( self : List[str] , lowerCamelCase : int , lowerCamelCase : int ) -> None:
lowerCAmelCase_, lowerCAmelCase_ : Union[str, Any] = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
lowerCAmelCase_, lowerCAmelCase_ : List[Any] = self.arr[j], self.arr[i]
def __lowercase ( self : Tuple , lowerCamelCase : int , lowerCamelCase : int ) -> bool:
return self.arr[i][1] < self.arr[j][1]
def __lowercase ( self : int , lowerCamelCase : int ) -> int:
lowerCAmelCase_ : List[str] = self._left(lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = self._right(lowerCamelCase )
lowerCAmelCase_ : Tuple = i
if left is not None and not self._cmp(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase_ : int = left
if right is not None and not self._cmp(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase_ : Optional[Any] = right
return valid_parent
def __lowercase ( self : List[Any] , lowerCamelCase : int ) -> None:
lowerCAmelCase_ : Tuple = self._parent(lowerCamelCase )
while parent is not None and not self._cmp(lowerCamelCase , lowerCamelCase ):
self._swap(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_, lowerCAmelCase_ : str = parent, self._parent(lowerCamelCase )
def __lowercase ( self : Tuple , lowerCamelCase : int ) -> None:
lowerCAmelCase_ : Optional[Any] = self._get_valid_parent(lowerCamelCase )
while valid_parent != index:
self._swap(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_, lowerCAmelCase_ : int = valid_parent, self._get_valid_parent(lowerCamelCase )
def __lowercase ( self : Optional[Any] , lowerCamelCase : int , lowerCamelCase : int ) -> None:
if item not in self.pos_map:
return
lowerCAmelCase_ : Dict = self.pos_map[item]
lowerCAmelCase_ : Dict = [item, self.key(lowerCamelCase )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowerCamelCase )
self._heapify_down(lowerCamelCase )
def __lowercase ( self : int , lowerCamelCase : int ) -> None:
if item not in self.pos_map:
return
lowerCAmelCase_ : List[str] = self.pos_map[item]
del self.pos_map[item]
lowerCAmelCase_ : Tuple = self.arr[self.size - 1]
lowerCAmelCase_ : List[str] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowerCamelCase )
self._heapify_down(lowerCamelCase )
def __lowercase ( self : List[Any] , lowerCamelCase : int , lowerCamelCase : int ) -> None:
lowerCAmelCase_ : Any = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(lowerCamelCase )] )
else:
lowerCAmelCase_ : str = [item, self.key(lowerCamelCase )]
lowerCAmelCase_ : Optional[Any] = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowercase ( self : str ) -> tuple | None:
return self.arr[0] if self.size else None
def __lowercase ( self : Optional[Any] ) -> tuple | None:
lowerCAmelCase_ : int = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def UpperCamelCase_ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ : List[str] =logging.get_logger(__name__)
snake_case_ : str ={
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class a__ ( __UpperCAmelCase ):
UpperCAmelCase_ : Tuple = 'xlm-roberta-xl'
def __init__( self , lowercase__=250880 , lowercase__=2560 , lowercase__=36 , lowercase__=32 , lowercase__=10240 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=514 , lowercase__=1 , lowercase__=0.02 , lowercase__=1e-05 , lowercase__=1 , lowercase__=0 , lowercase__=2 , lowercase__="absolute" , lowercase__=True , lowercase__=None , **lowercase__ , ) -> Any:
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = hidden_act
__A = intermediate_size
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = initializer_range
__A = layer_norm_eps
__A = position_embedding_type
__A = use_cache
__A = classifier_dropout
class a__ ( __UpperCAmelCase ):
@property
def _lowerCamelCase ( self ) -> Tuple:
if self.task == "multiple-choice":
__A = {0: "batch", 1: "choice", 2: "sequence"}
else:
__A = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 712 |
snake_case_ : str =[0, 2, 4, 6, 8]
snake_case_ : List[str] =[1, 3, 5, 7, 9]
def UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__A = 0
for digit in range(10 ):
__A = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , lowerCAmelCase__ , lowerCAmelCase__ )
return result
__A = 0
for digita in range(10 ):
__A = digita
if (remainder + digita) % 2 == 0:
__A = ODD_DIGITS
else:
__A = EVEN_DIGITS
for digita in other_parity_digits:
__A = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , lowerCAmelCase__ , lowerCAmelCase__ , )
return result
def UpperCAmelCase ( lowerCAmelCase__ = 9 ):
'''simple docstring'''
__A = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(lowerCAmelCase__ , 0 , [0] * length , lowerCAmelCase__ )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 205 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = StableDiffusionPanoramaPipeline
lowerCamelCase_ = TEXT_TO_IMAGE_PARAMS
lowerCamelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase : Dict =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowercase : int =DDIMScheduler()
torch.manual_seed(0 )
lowercase : Optional[int] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase : Optional[Any] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase : Optional[Any] =CLIPTextModel(UpperCAmelCase__ )
lowercase : List[Any] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase : int ={
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any=0 ):
'''simple docstring'''
lowercase : str =torch.manual_seed(UpperCAmelCase__ )
lowercase : int ={
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
# Setting height and width to None to prevent OOMs on CPU.
'''height''': None,
'''width''': None,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Optional[int] ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : Optional[int] =self.get_dummy_components()
lowercase : Union[str, Any] =StableDiffusionPanoramaPipeline(**UpperCAmelCase__ )
lowercase : Optional[Any] =sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : List[str] =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : str =sd_pipe(**UpperCAmelCase__ ).images
lowercase : str =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase : int =np.array([0.61_86, 0.53_74, 0.49_15, 0.41_35, 0.41_14, 0.45_63, 0.51_28, 0.49_77, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Optional[Any] ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : Any =self.get_dummy_components()
lowercase : int =StableDiffusionPanoramaPipeline(**UpperCAmelCase__ )
lowercase : List[Any] =sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : Dict =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : Tuple ='''french fries'''
lowercase : Union[str, Any] =sd_pipe(**UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ )
lowercase : Tuple =output.images
lowercase : Any =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase : Union[str, Any] =np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : List[str] ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : Tuple =self.get_dummy_components()
lowercase : Tuple =StableDiffusionPanoramaPipeline(**UpperCAmelCase__ )
lowercase : Union[str, Any] =sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : List[str] =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : int =sd_pipe(**UpperCAmelCase__ , view_batch_size=2 )
lowercase : Optional[int] =output.images
lowercase : List[str] =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase : List[str] =np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : int ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : str =self.get_dummy_components()
lowercase : str =EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' )
lowercase : Optional[int] =StableDiffusionPanoramaPipeline(**UpperCAmelCase__ )
lowercase : Tuple =sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : int =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : Tuple =sd_pipe(**UpperCAmelCase__ ).images
lowercase : Any =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase : Optional[int] =np.array([0.40_24, 0.65_10, 0.49_01, 0.53_78, 0.58_13, 0.56_22, 0.47_95, 0.44_67, 0.49_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Tuple ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : int =self.get_dummy_components()
lowercase : Optional[int] =PNDMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , skip_prk_steps=UpperCAmelCase__ )
lowercase : Optional[int] =StableDiffusionPanoramaPipeline(**UpperCAmelCase__ )
lowercase : int =sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : Union[str, Any] =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : Dict =sd_pipe(**UpperCAmelCase__ ).images
lowercase : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase : Optional[int] =np.array([0.63_91, 0.62_91, 0.48_61, 0.51_34, 0.55_52, 0.45_78, 0.50_32, 0.50_23, 0.45_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : Tuple=0 ):
'''simple docstring'''
lowercase : Dict =torch.manual_seed(UpperCAmelCase__ )
lowercase : Optional[int] ={
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Union[str, Any] ='''stabilityai/stable-diffusion-2-base'''
lowercase : Tuple =DDIMScheduler.from_pretrained(UpperCAmelCase__ , subfolder='''scheduler''' )
lowercase : Optional[Any] =StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase__ , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
lowercase : List[Any] =self.get_inputs()
lowercase : List[Any] =pipe(**UpperCAmelCase__ ).images
lowercase : Union[str, Any] =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase : str =np.array(
[
0.36_96_83_92,
0.27_02_53_72,
0.32_44_67_66,
0.28_37_93_87,
0.36_36_32_74,
0.30_73_33_47,
0.27_10_00_27,
0.27_05_41_25,
0.25_53_60_96,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Optional[Any] =StableDiffusionPanoramaPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-base''' , safety_checker=UpperCAmelCase__ )
lowercase : List[str] =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
lowercase : Optional[Any] =self.get_inputs()
lowercase : List[Any] =pipe(**UpperCAmelCase__ ).images
lowercase : Any =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase : Tuple =np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : Any =0
def callback_fn(UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : torch.FloatTensor ) -> None:
lowercase : Optional[Any] =True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase : Any =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase : Tuple =latents[0, -3:, -3:, -1]
lowercase : List[str] =np.array(
[
0.18_68_18_69,
0.33_90_78_16,
0.5_36_12_76,
0.14_43_28_65,
-0.02_85_66_11,
-0.73_94_11_23,
0.23_39_79_87,
0.47_32_26_82,
-0.37_82_31_64,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
lowercase : Union[str, Any] =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase : Any =latents[0, -3:, -3:, -1]
lowercase : int =np.array(
[
0.18_53_96_45,
0.33_98_72_48,
0.5_37_85_59,
0.14_43_71_42,
-0.02_45_52_61,
-0.7_33_83_17,
0.23_99_07_55,
0.47_35_62_72,
-0.3_78_65_05,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
lowercase : Union[str, Any] =False
lowercase : Any ='''stabilityai/stable-diffusion-2-base'''
lowercase : List[Any] =DDIMScheduler.from_pretrained(UpperCAmelCase__ , subfolder='''scheduler''' )
lowercase : Any =StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase__ , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ )
lowercase : Union[str, Any] =pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
lowercase : Tuple =self.get_inputs()
pipe(**UpperCAmelCase__ , callback=UpperCAmelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase : Optional[Any] ='''stabilityai/stable-diffusion-2-base'''
lowercase : str =DDIMScheduler.from_pretrained(UpperCAmelCase__ , subfolder='''scheduler''' )
lowercase : Optional[int] =StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase__ , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ )
lowercase : str =pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase : str =self.get_inputs()
lowercase : Tuple =pipe(**UpperCAmelCase__ )
lowercase : Optional[int] =torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 92 |
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
UpperCAmelCase__ =logging.get_logger(__name__)
@dataclass
class lowerCamelCase__ :
a : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
a : str = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
a : int = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a : bool = field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
__lowercase = self.task_name.lower()
class lowerCamelCase__ ( _a ):
a : List[Any] = """train"""
a : List[str] = """dev"""
a : Optional[int] = """test"""
class lowerCamelCase__ ( _a ):
a : GlueDataTrainingArguments
a : str
a : List[InputFeatures]
def __init__( self : Optional[int] , A_ : GlueDataTrainingArguments , A_ : PreTrainedTokenizerBase , A_ : Optional[int] = None , A_ : Union[str, Split] = Split.train , A_ : Optional[str] = None , ):
'''simple docstring'''
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the ๐ค Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , A_ , )
__lowercase = args
__lowercase = glue_processors[args.task_name]()
__lowercase = glue_output_modes[args.task_name]
if isinstance(A_ , A_ ):
try:
__lowercase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
__lowercase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , )
__lowercase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__lowercase , __lowercase = label_list[2], label_list[1]
__lowercase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowercase = cached_features_file + """.lock"""
with FileLock(A_ ):
if os.path.exists(A_ ) and not args.overwrite_cache:
__lowercase = time.time()
__lowercase = torch.load(A_ )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
else:
logger.info(F'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
__lowercase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
__lowercase = self.processor.get_test_examples(args.data_dir )
else:
__lowercase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
__lowercase = examples[:limit_length]
__lowercase = glue_convert_examples_to_features(
A_ , A_ , max_length=args.max_seq_length , label_list=A_ , output_mode=self.output_mode , )
__lowercase = time.time()
torch.save(self.features , A_ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self : int ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : Optional[Any] , A_ : Union[str, Any] ):
'''simple docstring'''
return self.features[i]
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
return self.label_list
| 616 | 0 |
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]:
__lowercase = torch.exp(SCREAMING_SNAKE_CASE )
__lowercase = torch.sum(SCREAMING_SNAKE_CASE , dim=1 ) # sum of exp(x_i)
__lowercase = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(SCREAMING_SNAKE_CASE ) - B / A
class A__ ( nn.Module ):
def __init__( self : str , _UpperCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
super().__init__()
__lowercase = config.output_attentions
__lowercase = config.output_hidden_states
__lowercase = nn.ModuleList([BertLayer(_UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
__lowercase = nn.ModuleList([BertHighway(_UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
__lowercase = [-1 for _ in range(config.num_hidden_layers )]
def a__ ( self : List[Any] , _UpperCAmelCase : Dict ) -> str:
"""simple docstring"""
if (type(_UpperCAmelCase ) is float) or (type(_UpperCAmelCase ) is int):
for i in range(len(self.early_exit_entropy ) ):
__lowercase = x
else:
__lowercase = x
def a__ ( self : List[str] , _UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
__lowercase = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def a__ ( self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : int=None , _UpperCAmelCase : Any=None , _UpperCAmelCase : Dict=None , ) -> List[str]:
"""simple docstring"""
__lowercase = ()
__lowercase = ()
__lowercase = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__lowercase = all_hidden_states + (hidden_states,)
__lowercase = layer_module(
_UpperCAmelCase , _UpperCAmelCase , head_mask[i] , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = layer_outputs[0]
if self.output_attentions:
__lowercase = all_attentions + (layer_outputs[1],)
__lowercase = (hidden_states,)
if self.output_hidden_states:
__lowercase = current_outputs + (all_hidden_states,)
if self.output_attentions:
__lowercase = current_outputs + (all_attentions,)
__lowercase = self.highway[i](_UpperCAmelCase )
# logits, pooled_output
if not self.training:
__lowercase = highway_exit[0]
__lowercase = entropy(_UpperCAmelCase )
__lowercase = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__lowercase = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__lowercase = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(_UpperCAmelCase , i + 1 )
else:
__lowercase = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__lowercase = all_hidden_states + (hidden_states,)
__lowercase = (hidden_states,)
if self.output_hidden_states:
__lowercase = outputs + (all_hidden_states,)
if self.output_attentions:
__lowercase = outputs + (all_attentions,)
__lowercase = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , lowerCAmelCase__ , )
class A__ ( lowerCAmelCase__ ):
def __init__( self : str , _UpperCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
super().__init__(_UpperCAmelCase )
__lowercase = config
__lowercase = BertEmbeddings(_UpperCAmelCase )
__lowercase = DeeBertEncoder(_UpperCAmelCase )
__lowercase = BertPooler(_UpperCAmelCase )
self.init_weights()
def a__ ( self : Optional[int] ) -> int:
"""simple docstring"""
self.encoder.init_highway_pooler(self.pooler )
def a__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
return self.embeddings.word_embeddings
def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = value
def a__ ( self : Optional[Any] , _UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(_UpperCAmelCase )
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def a__ ( self : Tuple , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Optional[int]=None , ) -> Optional[Any]:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
__lowercase = input_ids.size()
elif inputs_embeds is not None:
__lowercase = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
__lowercase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__lowercase = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if encoder_attention_mask is None:
__lowercase = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if token_type_ids is None:
__lowercase = torch.zeros(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__lowercase = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__lowercase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__lowercase = encoder_attention_mask[:, None, None, :]
__lowercase = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__lowercase = (1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__lowercase = self.get_head_mask(_UpperCAmelCase , self.config.num_hidden_layers )
__lowercase = self.embeddings(
input_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase )
__lowercase = self.encoder(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
__lowercase = encoder_outputs[0]
__lowercase = self.pooler(_UpperCAmelCase )
__lowercase = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class A__ ( lowerCAmelCase__ ):
def __init__( self : str , _UpperCAmelCase : Dict , _UpperCAmelCase : str ) -> Any:
"""simple docstring"""
__lowercase = message
__lowercase = exit_layer # start from 1!
class A__ ( nn.Module ):
def __init__( self : int , _UpperCAmelCase : Any ) -> Tuple:
"""simple docstring"""
super().__init__()
__lowercase = BertPooler(_UpperCAmelCase )
__lowercase = nn.Dropout(config.hidden_dropout_prob )
__lowercase = nn.Linear(config.hidden_size , config.num_labels )
def a__ ( self : Optional[Any] , _UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = encoder_outputs[0]
__lowercase = self.pooler(_UpperCAmelCase )
# "return" pooler_output
# BertModel
__lowercase = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__lowercase = bmodel_output[1]
__lowercase = self.dropout(_UpperCAmelCase )
__lowercase = self.classifier(_UpperCAmelCase )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , lowerCAmelCase__ , )
class A__ ( lowerCAmelCase__ ):
def __init__( self : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
super().__init__(_UpperCAmelCase )
__lowercase = config.num_labels
__lowercase = config.num_hidden_layers
__lowercase = DeeBertModel(_UpperCAmelCase )
__lowercase = nn.Dropout(config.hidden_dropout_prob )
__lowercase = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def a__ ( self : Optional[Any] , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Any=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Any=None , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Tuple=-1 , _UpperCAmelCase : Dict=False , ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.num_layers
try:
__lowercase = self.bert(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__lowercase = outputs[1]
__lowercase = self.dropout(_UpperCAmelCase )
__lowercase = self.classifier(_UpperCAmelCase )
__lowercase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__lowercase = e.message
__lowercase = e.exit_layer
__lowercase = outputs[0]
if not self.training:
__lowercase = entropy(_UpperCAmelCase )
__lowercase = []
__lowercase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__lowercase = MSELoss()
__lowercase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__lowercase = CrossEntropyLoss()
__lowercase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__lowercase = []
for highway_exit in outputs[-1]:
__lowercase = highway_exit[0]
if not self.training:
highway_logits_all.append(_UpperCAmelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__lowercase = MSELoss()
__lowercase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__lowercase = CrossEntropyLoss()
__lowercase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_UpperCAmelCase )
if train_highway:
__lowercase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__lowercase = (loss,) + outputs
if not self.training:
__lowercase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__lowercase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 688 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
__lowercase = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
__lowercase = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE )
# Let's go
__lowercase = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE , 'func' ):
parser.print_help()
exit(1 )
# Run
__lowercase = args.func(SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 688 | 1 |
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class __UpperCamelCase ( enum.Enum ):
_UpperCAmelCase = 0
_UpperCAmelCase = 1
_UpperCAmelCase = 2
@add_end_docstrings(a__ )
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
super().__init__(*_A ,**_A )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_lowerCAmelCase : List[str] = None
if self.model.config.prefix is not None:
_lowerCAmelCase : Any = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_lowerCAmelCase : str = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = self._sanitize_parameters(prefix=_A ,**self._forward_params )
_lowerCAmelCase : Union[str, Any] = {**self._preprocess_params, **preprocess_params}
_lowerCAmelCase : Any = {**self._forward_params, **forward_params}
def __lowerCamelCase ( self ,_A=None ,_A=None ,_A=None ,_A=None ,_A=None ,_A=None ,_A=None ,_A=None ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = {}
if prefix is not None:
_lowerCAmelCase : Dict = prefix
if prefix:
_lowerCAmelCase : Optional[int] = self.tokenizer(
_A ,padding=_A ,add_special_tokens=_A ,return_tensors=self.framework )
_lowerCAmelCase : Optional[int] = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
' [None, \'hole\']' )
_lowerCAmelCase : Optional[Any] = handle_long_generation
preprocess_params.update(_A )
_lowerCAmelCase : int = generate_kwargs
_lowerCAmelCase : List[Any] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
_lowerCAmelCase : Optional[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
_lowerCAmelCase : Tuple = ReturnType.TENSORS
if return_type is not None:
_lowerCAmelCase : Union[str, Any] = return_type
if clean_up_tokenization_spaces is not None:
_lowerCAmelCase : List[str] = clean_up_tokenization_spaces
if stop_sequence is not None:
_lowerCAmelCase : Optional[int] = self.tokenizer.encode(_A ,add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
_lowerCAmelCase : str = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowerCamelCase ( self ,*_A ,**_A ):
'''simple docstring'''
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*_A ,**_A )
def __call__( self ,_A ,**_A ):
'''simple docstring'''
return super().__call__(_A ,**_A )
def __lowerCamelCase ( self ,_A ,_A="" ,_A=None ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.tokenizer(
prefix + prompt_text ,padding=_A ,add_special_tokens=_A ,return_tensors=self.framework )
_lowerCAmelCase : Union[str, Any] = prompt_text
if handle_long_generation == "hole":
_lowerCAmelCase : Optional[Any] = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
_lowerCAmelCase : List[Any] = generate_kwargs['max_new_tokens']
else:
_lowerCAmelCase : Any = generate_kwargs.get('max_length' ,self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_lowerCAmelCase : Optional[Any] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
_lowerCAmelCase : str = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
_lowerCAmelCase : int = inputs['attention_mask'][:, -keep_length:]
return inputs
def __lowerCamelCase ( self ,_A ,**_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = model_inputs['input_ids']
_lowerCAmelCase : int = model_inputs.get('attention_mask' ,_A )
# Allow empty prompts
if input_ids.shape[1] == 0:
_lowerCAmelCase : Dict = None
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Dict = 1
else:
_lowerCAmelCase : Optional[int] = input_ids.shape[0]
_lowerCAmelCase : Union[str, Any] = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_lowerCAmelCase : Union[str, Any] = generate_kwargs.pop('prefix_length' ,0 )
if prefix_length > 0:
_lowerCAmelCase : Any = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
_lowerCAmelCase : List[Any] = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_lowerCAmelCase : Tuple = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_lowerCAmelCase : int = self.model.generate(input_ids=_A ,attention_mask=_A ,**_A )
_lowerCAmelCase : Dict = generated_sequence.shape[0]
if self.framework == "pt":
_lowerCAmelCase : Tuple = generated_sequence.reshape(_A ,out_b // in_b ,*generated_sequence.shape[1:] )
elif self.framework == "tf":
_lowerCAmelCase : str = tf.reshape(_A ,(in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __lowerCamelCase ( self ,_A ,_A=ReturnType.FULL_TEXT ,_A=True ):
'''simple docstring'''
_lowerCAmelCase : List[str] = model_outputs['generated_sequence'][0]
_lowerCAmelCase : Tuple = model_outputs['input_ids']
_lowerCAmelCase : Optional[int] = model_outputs['prompt_text']
_lowerCAmelCase : List[Any] = generated_sequence.numpy().tolist()
_lowerCAmelCase : Any = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_lowerCAmelCase : Optional[Any] = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_lowerCAmelCase : Optional[int] = self.tokenizer.decode(
_A ,skip_special_tokens=_A ,clean_up_tokenization_spaces=_A ,)
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_lowerCAmelCase : Dict = 0
else:
_lowerCAmelCase : Dict = len(
self.tokenizer.decode(
input_ids[0] ,skip_special_tokens=_A ,clean_up_tokenization_spaces=_A ,) )
if return_type == ReturnType.FULL_TEXT:
_lowerCAmelCase : str = prompt_text + text[prompt_length:]
else:
_lowerCAmelCase : Optional[int] = text[prompt_length:]
_lowerCAmelCase : Optional[Any] = {'generated_text': all_text}
records.append(_A )
return records
| 259 |
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
debug_launcher(test_script.main )
def __lowerCamelCase ( self ):
'''simple docstring'''
debug_launcher(test_ops.main )
| 259 | 1 |
'''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'facebook/data2vec-base-960h': 'https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
A_ : Dict = 'data2vec-audio'
def __init__(self : str , a__ : int=32 , a__ : Union[str, Any]=768 , a__ : Tuple=12 , a__ : Union[str, Any]=12 , a__ : Dict=3072 , a__ : int="gelu" , a__ : Optional[int]=0.1 , a__ : int=0.1 , a__ : Dict=0.1 , a__ : int=0.0 , a__ : Tuple=0.1 , a__ : str=0.1 , a__ : Union[str, Any]=0.0_2 , a__ : int=1E-5 , a__ : List[str]="gelu" , a__ : int=(512, 512, 512, 512, 512, 512, 512) , a__ : List[str]=(5, 2, 2, 2, 2, 2, 2) , a__ : Optional[Any]=(10, 3, 3, 3, 3, 2, 2) , a__ : Tuple=False , a__ : Optional[int]=16 , a__ : Optional[int]=19 , a__ : List[str]=5 , a__ : Dict=0.0_5 , a__ : Dict=10 , a__ : Dict=2 , a__ : Tuple=0.0 , a__ : List[str]=10 , a__ : str=0 , a__ : Union[str, Any]="sum" , a__ : List[str]=False , a__ : Any=False , a__ : int=256 , a__ : Dict=(512, 512, 512, 512, 1500) , a__ : Union[str, Any]=(5, 3, 3, 1, 1) , a__ : int=(1, 2, 3, 1, 1) , a__ : Union[str, Any]=512 , a__ : Optional[Any]=0 , a__ : Any=1 , a__ : Dict=2 , a__ : Tuple=False , a__ : Dict=3 , a__ : Union[str, Any]=2 , a__ : str=3 , a__ : Optional[int]=None , **a__ : Tuple , ):
"""simple docstring"""
super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a )
__snake_case = hidden_size
__snake_case = feat_extract_activation
__snake_case = list(_a )
__snake_case = list(_a )
__snake_case = list(_a )
__snake_case = conv_bias
__snake_case = num_conv_pos_embeddings
__snake_case = num_conv_pos_embedding_groups
__snake_case = conv_pos_kernel_size
__snake_case = len(self.conv_dim )
__snake_case = num_hidden_layers
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = num_attention_heads
__snake_case = hidden_dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = feat_proj_dropout
__snake_case = final_dropout
__snake_case = layerdrop
__snake_case = layer_norm_eps
__snake_case = initializer_range
__snake_case = vocab_size
__snake_case = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case = mask_time_prob
__snake_case = mask_time_length
__snake_case = mask_time_min_masks
__snake_case = mask_feature_prob
__snake_case = mask_feature_length
__snake_case = mask_feature_min_masks
# ctc loss
__snake_case = ctc_loss_reduction
__snake_case = ctc_zero_infinity
# adapter
__snake_case = add_adapter
__snake_case = adapter_kernel_size
__snake_case = adapter_stride
__snake_case = num_adapter_layers
__snake_case = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__snake_case = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__snake_case = list(_a )
__snake_case = list(_a )
__snake_case = list(_a )
__snake_case = xvector_output_dim
@property
def a (self : Dict ):
"""simple docstring"""
return math.prod(self.conv_stride )
| 710 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : int = StableDiffusionDiffEditPipeline
A_ : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
A_ : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
A_ : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A_ : List[Any] = frozenset([] )
def a (self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__snake_case = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a__ , )
__snake_case = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=a__ , set_alpha_to_one=a__ , )
__snake_case = DDIMInverseScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=a__ , set_alpha_to_zero=a__ , )
torch.manual_seed(0 )
__snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
__snake_case = CLIPTextModel(a__ )
__snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__snake_case = {
'''unet''': unet,
'''scheduler''': scheduler,
'''inverse_scheduler''': inverse_scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def a (self : Any , a__ : Optional[Any] , a__ : List[str]=0 ):
"""simple docstring"""
__snake_case = floats_tensor((1, 16, 16) , rng=random.Random(a__ ) ).to(a__ )
__snake_case = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(a__ ) ).to(a__ )
if str(a__ ).startswith('''mps''' ):
__snake_case = torch.manual_seed(a__ )
else:
__snake_case = torch.Generator(device=a__ ).manual_seed(a__ )
__snake_case = {
'''prompt''': '''a dog and a newt''',
'''mask_image''': mask,
'''image_latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def a (self : int , a__ : Optional[Any] , a__ : Optional[Any]=0 ):
"""simple docstring"""
__snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__snake_case = Image.fromarray(np.uinta(a__ ) ).convert('''RGB''' )
if str(a__ ).startswith('''mps''' ):
__snake_case = torch.manual_seed(a__ )
else:
__snake_case = torch.Generator(device=a__ ).manual_seed(a__ )
__snake_case = {
'''image''': image,
'''source_prompt''': '''a cat and a frog''',
'''target_prompt''': '''a dog and a newt''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''num_maps_per_mask''': 2,
'''mask_encode_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def a (self : List[Any] , a__ : Dict , a__ : Any=0 ):
"""simple docstring"""
__snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__snake_case = Image.fromarray(np.uinta(a__ ) ).convert('''RGB''' )
if str(a__ ).startswith('''mps''' ):
__snake_case = torch.manual_seed(a__ )
else:
__snake_case = torch.Generator(device=a__ ).manual_seed(a__ )
__snake_case = {
'''image''': image,
'''prompt''': '''a cat and a frog''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''decode_latents''': True,
'''output_type''': '''numpy''',
}
return inputs
def a (self : List[Any] ):
"""simple docstring"""
if not hasattr(self.pipeline_class , '''_optional_components''' ):
return
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(a__ , a__ , a__ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
__snake_case = self.get_dummy_inputs(a__ )
__snake_case = pipe(**a__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a__ )
__snake_case = self.pipeline_class.from_pretrained(a__ )
pipe_loaded.to(a__ )
pipe_loaded.set_progress_bar_config(disable=a__ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(a__ , a__ ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , )
__snake_case = self.get_dummy_inputs(a__ )
__snake_case = pipe_loaded(**a__ )[0]
__snake_case = np.abs(output - output_loaded ).max()
self.assertLess(a__ , 1E-4 )
def a (self : str ):
"""simple docstring"""
__snake_case = '''cpu'''
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_mask_inputs(a__ )
__snake_case = pipe.generate_mask(**a__ )
__snake_case = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
__snake_case = np.array([0] * 9 )
__snake_case = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a__ , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = '''cpu'''
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inversion_inputs(a__ )
__snake_case = pipe.invert(**a__ ).images
__snake_case = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
__snake_case = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
__snake_case = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a__ , 1E-3 )
def a (self : Optional[int] ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = '''cpu'''
__snake_case = self.get_dummy_components()
__snake_case = {'''beta_start''': 0.0_0_0_8_5, '''beta_end''': 0.0_1_2, '''beta_schedule''': '''scaled_linear'''}
__snake_case = DPMSolverMultistepScheduler(**a__ )
__snake_case = DPMSolverMultistepInverseScheduler(**a__ )
__snake_case = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inversion_inputs(a__ )
__snake_case = pipe.invert(**a__ ).images
__snake_case = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
__snake_case = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
__snake_case = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a__ , 1E-3 )
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def a (cls : Dict ):
"""simple docstring"""
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''' )
__snake_case = raw_image.convert('''RGB''' ).resize((768, 768) )
__snake_case = raw_image
def a (self : List[str] ):
"""simple docstring"""
__snake_case = torch.manual_seed(0 )
__snake_case = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=a__ , torch_dtype=torch.floataa )
__snake_case = DDIMScheduler.from_config(pipe.scheduler.config )
__snake_case = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a__ )
__snake_case = '''a bowl of fruit'''
__snake_case = '''a bowl of pears'''
__snake_case = pipe.generate_mask(
image=self.raw_image , source_prompt=a__ , target_prompt=a__ , generator=a__ , )
__snake_case = pipe.invert(
prompt=a__ , image=self.raw_image , inpaint_strength=0.7 , generator=a__ ).latents
__snake_case = pipe(
prompt=a__ , mask_image=a__ , image_latents=a__ , generator=a__ , negative_prompt=a__ , inpaint_strength=0.7 , output_type='''numpy''' , ).images[0]
__snake_case = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def a (self : str ):
"""simple docstring"""
__snake_case = torch.manual_seed(0 )
__snake_case = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=a__ , torch_dtype=torch.floataa )
__snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__snake_case = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a__ )
__snake_case = '''a bowl of fruit'''
__snake_case = '''a bowl of pears'''
__snake_case = pipe.generate_mask(
image=self.raw_image , source_prompt=a__ , target_prompt=a__ , generator=a__ , )
__snake_case = pipe.invert(
prompt=a__ , image=self.raw_image , inpaint_strength=0.7 , generator=a__ , num_inference_steps=25 , ).latents
__snake_case = pipe(
prompt=a__ , mask_image=a__ , image_latents=a__ , generator=a__ , negative_prompt=a__ , inpaint_strength=0.7 , num_inference_steps=25 , output_type='''numpy''' , ).images[0]
__snake_case = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 388 | 0 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : List[Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_A, _A )
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ ,__magic_name__ : int = emb.weight.shape
__magic_name__ : int = nn.Linear(_A, _A, bias=_A )
__magic_name__ : Union[str, Any] = emb.weight.data
return lin_layer
def UpperCamelCase ( _A, _A=None ):
"""simple docstring"""
__magic_name__ : str = {}
for old_key in state_dict.keys():
__magic_name__ : str = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
__magic_name__ : int = key.replace("""moe_layer.experts.0""", f'ffn.experts.expert_{expert_idx}' )
else:
__magic_name__ : Optional[int] = key.replace("""moe_layer.experts.""", """ffn.experts.expert_""" )
if "gate" in key:
__magic_name__ : List[Any] = key.replace(""".moe_layer.gate.wg""", """.ffn.router.classifier""" )
if "fc2" and "experts" not in key:
__magic_name__ : Any = key.replace(""".fc2.""", """.ffn.fc2.""" )
if "fc1" and "experts" not in key:
__magic_name__ : Any = key.replace(""".fc1.""", """.ffn.fc1.""" )
if ".encoder_attn." in key:
__magic_name__ : Union[str, Any] = key.replace(""".encoder_attn.""", """.cross_attention.""" )
if "encoder_attn_layer_norm" in key:
__magic_name__ : Any = key.replace("""encoder_attn_layer_norm""", """cross_attention_layer_norm""" )
if "final_layer_norm" in key:
__magic_name__ : str = key.replace("""final_layer_norm""", """ff_layer_norm""" )
__magic_name__ : List[Any] = state_dict[old_key]
return new_dict
def UpperCamelCase ( _A, _A, _A, _A, _A = WEIGHTS_NAME ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = []
__magic_name__ : str = 0
os.makedirs(_A, exist_ok=_A )
for expert in range(_A ):
__magic_name__ : str = switch_checkpoint_path + f'-rank-{expert}.pt'
if os.path.isfile(_A ):
__magic_name__ : str = torch.load(_A )["""model"""]
remove_ignore_keys_(_A )
__magic_name__ : str = rename_fairseq_keys(_A, _A )
__magic_name__ : Union[str, Any] = os.path.join(
_A, weights_name.replace(""".bin""", f'-{len(_A )+1:05d}-of-???.bin' ) )
torch.save(_A, _A )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_A )[0]].dtype )
# Add the last block
__magic_name__ : int = os.path.join(_A, weights_name.replace(""".bin""", f'-{len(_A )+1:05d}-of-???.bin' ) )
__magic_name__ : Optional[Any] = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""]
remove_ignore_keys_(_A )
__magic_name__ : Optional[int] = rename_fairseq_keys(_A, _A )
__magic_name__ : Optional[int] = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_A ) == 1:
__magic_name__ : str = os.path.join(_A, _A )
torch.save(_A, _A )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_A, _A )
# Otherwise, let's build the index
__magic_name__ : Tuple = {}
for idx, shard in enumerate(_A ):
__magic_name__ : Optional[Any] = weights_name.replace(""".bin""", f'-{idx+1:05d}-of-{len(_A ):05d}.bin' )
__magic_name__ : Any = os.path.join(_A, weights_name.replace(""".bin""", f'-{idx+1:05d}-of-???.bin' ) )
os.rename(_A, os.path.join(_A, _A ) )
for key in shard:
__magic_name__ : List[str] = shard_file
# Add the metadata
__magic_name__ : List[str] = {"""total_size""": total_size}
__magic_name__ : Optional[int] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_A, _A ), """w""", encoding="""utf-8""" ) as f:
__magic_name__ : str = json.dumps(_A, indent=2, sort_keys=_A ) + """\n"""
f.write(_A )
return metadata, index
if __name__ == "__main__":
__magic_name__: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__magic_name__: List[str] = parser.parse_args()
__magic_name__ , __magic_name__: Optional[Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__magic_name__: List[Any] = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__magic_name__: Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 324 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
__magic_name__: Optional[int] = logging.get_logger(__name__)
__magic_name__: List[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__magic_name__: Optional[Any] = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
__magic_name__: List[Any] = {
"squeezebert/squeezebert-uncased": 512,
"squeezebert/squeezebert-mnli": 512,
"squeezebert/squeezebert-mnli-headless": 512,
}
__magic_name__: Union[str, Any] = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Optional[int] = VOCAB_FILES_NAMES
lowercase__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Tuple = PRETRAINED_INIT_CONFIGURATION
lowercase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Dict = SqueezeBertTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__="[UNK]" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="[PAD]" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[MASK]" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> List[str]:
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
__magic_name__ : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCAmelCase__ ) != tokenize_chinese_chars
):
__magic_name__ : Any = getattr(lowerCAmelCase__ , normalizer_state.pop("""type""" ) )
__magic_name__ : Any = do_lower_case
__magic_name__ : List[str] = strip_accents
__magic_name__ : int = tokenize_chinese_chars
__magic_name__ : int = normalizer_class(**lowerCAmelCase__ )
__magic_name__ : Optional[int] = do_lower_case
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> List[Any]:
__magic_name__ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
__magic_name__ : int = [self.sep_token_id]
__magic_name__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
__magic_name__ : Optional[Any] = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 324 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCamelCase : Tuple ={'''tokenization_bertweet''': ['''BertweetTokenizer''']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
lowerCamelCase : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 237 | """simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowerCamelCase : Tuple =logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] ={
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __snake_case( A_ ):
'''simple docstring'''
_UpperCAmelCase = "umt5"
_UpperCAmelCase = ["past_key_values"]
def __init__( self , __lowerCamelCase=250112 , __lowerCamelCase=512 , __lowerCamelCase=64 , __lowerCamelCase=1024 , __lowerCamelCase=8 , __lowerCamelCase=None , __lowerCamelCase=6 , __lowerCamelCase=32 , __lowerCamelCase=128 , __lowerCamelCase=0.1 , __lowerCamelCase=1e-6 , __lowerCamelCase=1.0 , __lowerCamelCase="gated-gelu" , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase="T5Tokenizer" , __lowerCamelCase=True , __lowerCamelCase=0 , __lowerCamelCase=1 , __lowerCamelCase=0 , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(
is_encoder_decoder=__lowerCamelCase , tokenizer_class=__lowerCamelCase , tie_word_embeddings=__lowerCamelCase , pad_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , **__lowerCamelCase , )
__A : Union[str, Any] = vocab_size
__A : Any = d_model
__A : str = d_kv
__A : List[Any] = d_ff
__A : Union[str, Any] = num_layers
__A : Tuple = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__A : Union[str, Any] = num_heads
__A : str = relative_attention_num_buckets
__A : Union[str, Any] = relative_attention_max_distance
__A : int = dropout_rate
__A : int = layer_norm_epsilon
__A : int = initializer_factor
__A : List[Any] = feed_forward_proj
__A : str = use_cache
__A : str = self.feed_forward_proj.split('-' )
__A : str = act_info[-1]
__A : Any = act_info[0] == 'gated'
if len(__lowerCamelCase ) > 1 and act_info[0] != "gated" or len(__lowerCamelCase ) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
__A : Optional[int] = 'gelu_new'
@property
def _a ( self ):
'''simple docstring'''
return self.d_model
@property
def _a ( self ):
'''simple docstring'''
return self.num_heads
@property
def _a ( self ):
'''simple docstring'''
return self.num_layers
class __snake_case( A_ ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def _a ( self ):
'''simple docstring'''
__A : List[Any] = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__A : int = 'past_encoder_sequence + sequence'
__A : List[str] = {0: 'batch'}
__A : Dict = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__A : List[str] = {0: 'batch', 1: 'decoder_sequence'}
__A : str = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def _a ( self ):
'''simple docstring'''
return 13
@property
def _a ( self ):
'''simple docstring'''
return 5e-4
| 237 | 1 |
'''simple docstring'''
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def a__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''-m''', '''--pretrained_model_name_or_path''', type=lowercase, default=lowercase, required=lowercase, help='''Path to pretrained model or model identifier from huggingface.co/models.''', )
parser.add_argument(
'''-c''', '''--caption''', type=lowercase, default='''robotic cat with wings''', help='''Text used to generate images.''', )
parser.add_argument(
'''-n''', '''--images_num''', type=lowercase, default=4, help='''How much images to generate.''', )
parser.add_argument(
'''-s''', '''--seed''', type=lowercase, default=42, help='''Seed for random process.''', )
parser.add_argument(
'''-ci''', '''--cuda_id''', type=lowercase, default=0, help='''cuda_id.''', )
_UpperCamelCase = parser.parse_args()
return args
def a__ ( lowercase : Tuple, lowercase : int, lowercase : int ) -> List[str]:
"""simple docstring"""
if not len(lowercase ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
_UpperCamelCase , _UpperCamelCase = imgs[0].size
_UpperCamelCase = Image.new('''RGB''', size=(cols * w, rows * h) )
_UpperCamelCase , _UpperCamelCase = grid.size
for i, img in enumerate(lowercase ):
grid.paste(lowercase, box=(i % cols * w, i // cols * h) )
return grid
def a__ ( lowercase : List[str], lowercase : int="robotic cat with wings", lowercase : Optional[int]=7.5, lowercase : List[str]=50, lowercase : List[str]=1, lowercase : str=42, ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = torch.Generator(pipeline.device ).manual_seed(lowercase )
_UpperCamelCase = pipeline(
lowercase, guidance_scale=lowercase, num_inference_steps=lowercase, generator=lowercase, num_images_per_prompt=lowercase, ).images
_UpperCamelCase = int(math.sqrt(lowercase ) )
_UpperCamelCase = image_grid(lowercase, rows=_rows, cols=num_images_per_prompt // _rows )
return grid, images
lowercase__ : List[Any] = parse_args()
# Load models and create wrapper for stable diffusion
lowercase__ : Union[str, Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
lowercase__ : Tuple = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
lowercase__ : int = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
lowercase__ : int = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
lowercase__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowercase__ : Optional[Any] = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
lowercase__ : int = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
lowercase__ : str = unet.to(torch.device('cuda', args.cuda_id))
lowercase__ : str = pipeline.to(unet.device)
lowercase__ , lowercase__ : List[str] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
lowercase__ : Dict = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 98 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _lowerCamelCase ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , _SCREAMING_SNAKE_CASE = 768 , )->Union[str, Any]:
'''simple docstring'''
super().__init__()
A_ : Union[str, Any] = nn.Parameter(torch.zeros(1 , _SCREAMING_SNAKE_CASE ) )
A_ : Any = nn.Parameter(torch.ones(1 , _SCREAMING_SNAKE_CASE ) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , )->Tuple:
'''simple docstring'''
A_ : Optional[Any] = nn.Parameter(self.mean.to(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) )
A_ : str = nn.Parameter(self.std.to(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) )
return self
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
A_ : Tuple = (embeds - self.mean) * 1.0 / self.std
return embeds
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->List[str]:
'''simple docstring'''
A_ : List[str] = (embeds * self.std) + self.mean
return embeds
| 590 | 0 |
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
A__ : Tuple= get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
class __lowerCamelCase ( _a , unittest.TestCase ):
a : str =BartphoTokenizer
a : Union[str, Any] =False
a : str =True
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
super().setUp()
UpperCamelCase__ = ['โThis', 'โis', 'โa', 'โt', 'est']
UpperCamelCase__ = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
UpperCamelCase__ = {'unk_token': '<unk>'}
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['monolingual_vocab_file'] )
with open(self.monolingual_vocab_file , 'w' , encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(F'{token} {vocab_tokens[token]}\n' )
UpperCamelCase__ = BartphoTokenizer(snake_case_ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self , **snake_case_ ) -> int:
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Dict:
UpperCamelCase__ = 'This is a lร test'
UpperCamelCase__ = 'This is a<unk><unk> test'
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = BartphoTokenizer(snake_case_ , self.monolingual_vocab_file , **self.special_tokens_map )
UpperCamelCase__ = 'This is a lร test'
UpperCamelCase__ = 'โThis โis โa โl ร โt est'.split()
UpperCamelCase__ = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
| 20 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __lowerCamelCase :
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> Tuple:
UpperCamelCase__ = parent
UpperCamelCase__ = 13
UpperCamelCase__ = 7
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = 99
UpperCamelCase__ = 384
UpperCamelCase__ = 2
UpperCamelCase__ = 4
UpperCamelCase__ = 37
UpperCamelCase__ = 'gelu'
UpperCamelCase__ = 0.1
UpperCamelCase__ = 0.1
UpperCamelCase__ = 512
UpperCamelCase__ = 16
UpperCamelCase__ = 2
UpperCamelCase__ = 0.02
UpperCamelCase__ = 3
UpperCamelCase__ = 4
UpperCamelCase__ = 128
UpperCamelCase__ = 2
UpperCamelCase__ = 9
UpperCamelCase__ = 1
UpperCamelCase__ = None
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=snake_case_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = TFConvBertModel(config=snake_case_ )
UpperCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase__ = [input_ids, input_mask]
UpperCamelCase__ = model(snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = TFConvBertForMaskedLM(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFConvBertForSequenceClassification(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
UpperCamelCase__ = self.num_choices
UpperCamelCase__ = TFConvBertForMultipleChoice(config=snake_case_ )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFConvBertForTokenClassification(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = TFConvBertForQuestionAnswering(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( _a , _a , unittest.TestCase ):
a : Any =(
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a : str =(
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a : Any =False
a : Dict =False
a : str =False
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = TFConvBertModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = True
UpperCamelCase__ = True
if hasattr(snake_case_ , 'use_cache' ):
UpperCamelCase__ = True
UpperCamelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase__ = getattr(self.model_tester , 'key_length' , snake_case_ )
for model_class in self.all_model_classes:
UpperCamelCase__ = self._prepare_for_class(snake_case_ , snake_case_ )
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = len(model(snake_case_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ , saved_model=snake_case_ )
UpperCamelCase__ = os.path.join(snake_case_ , 'saved_model' , '1' )
UpperCamelCase__ = tf.keras.models.load_model(snake_case_ )
UpperCamelCase__ = model(snake_case_ )
if self.is_encoder_decoder:
UpperCamelCase__ = outputs['encoder_hidden_states']
UpperCamelCase__ = outputs['encoder_attentions']
else:
UpperCamelCase__ = outputs['hidden_states']
UpperCamelCase__ = outputs['attentions']
self.assertEqual(len(snake_case_ ) , snake_case_ )
UpperCamelCase__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case_ ) , snake_case_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = True
UpperCamelCase__ = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase__ = getattr(self.model_tester , 'key_length' , snake_case_ )
UpperCamelCase__ = getattr(self.model_tester , 'key_length' , snake_case_ )
def check_decoder_attentions_output(snake_case_ ):
UpperCamelCase__ = len(snake_case_ )
self.assertEqual(out_len % 2 , 0 )
UpperCamelCase__ = outputs.decoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(snake_case_ ):
UpperCamelCase__ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
UpperCamelCase__ = len(snake_case_ )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
if self.is_encoder_decoder:
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_decoder_attentions_output(snake_case_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase__ = True
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
# Check attention is always last and order is fine
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(snake_case_ ) )
self.assertEqual(model.config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
UpperCamelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase__ = model(snake_case_ )[0]
UpperCamelCase__ = [1, 6, 768]
self.assertEqual(output.shape , snake_case_ )
UpperCamelCase__ = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-4 )
| 20 | 1 |
import os
def UpperCAmelCase__ ( __snake_case ) -> Dict:
_A = len(grid[0] )
_A = len(snake_case_ )
_A = 0
_A = 0
_A = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(snake_case_ ):
for j in range(n_rows - 3 ):
_A = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
_A = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
_A = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
_A = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
_A = max(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if max_product > largest:
_A = max_product
return largest
def UpperCAmelCase__ ( ) -> str:
_A = []
with open(os.path.dirname(snake_case_ ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
_A = [[int(snake_case_ ) for i in grid[j]] for j in range(len(snake_case_ ) )]
return largest_product(snake_case_ )
if __name__ == "__main__":
print(solution()) | 317 |
"""simple docstring"""
import numpy as np
def A_ ( snake_case_ : Tuple ,snake_case_ : Any ,snake_case_ : str ,snake_case_ : Optional[int] ,snake_case_ : List[str] ):
'''simple docstring'''
UpperCamelCase : int = int(np.ceil((x_end - xa) / h ) )
UpperCamelCase : Dict = np.zeros((n + 1,) )
UpperCamelCase : Optional[int] = ya
UpperCamelCase : Optional[Any] = xa
for k in range(snake_case_ ):
UpperCamelCase : Optional[Any] = f(snake_case_ ,y[k] )
UpperCamelCase : Optional[Any] = f(x + 0.5 * h ,y[k] + 0.5 * h * ka )
UpperCamelCase : Optional[Any] = f(x + 0.5 * h ,y[k] + 0.5 * h * ka )
UpperCamelCase : Optional[int] = f(x + h ,y[k] + h * ka )
UpperCamelCase : Tuple = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 499 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class SCREAMING_SNAKE_CASE ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def __A ( self: Dict ) -> int:
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=__A , )
def __A ( self: Optional[int] , __A: Any , __A: List[str] ) -> str:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def __A ( self: List[str] , __A: str , __A: List[str] ) -> Dict:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__A )
class SCREAMING_SNAKE_CASE ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def __A ( self: Optional[Any] ) -> Optional[int]:
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=__A , )
def __A ( self: int , __A: str , __A: str ) -> Tuple:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def __A ( self: Optional[Any] , __A: Dict , __A: int ) -> Tuple:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__A )
def __A ( ):
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def __A ( ):
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
@require_beam
def __A ( self: Union[str, Any] ) -> str:
_A = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_A = DummyBeamDataset(cache_dir=__A , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
_A = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __A )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def __A ( self: Tuple ) -> Dict:
import apache_beam as beam
_A = beam.io.parquetio.WriteToParquet
_A = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_A = DummyBeamDataset(cache_dir=__A , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
_A = partial(__A , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
__A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
__A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
_A = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __A )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(__A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def __A ( self: List[str] ) -> int:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_A = DummyBeamDataset(cache_dir=__A )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __A ( self: List[str] ) -> List[Any]:
_A = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_A = NestedBeamDataset(cache_dir=__A , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
_A = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __A )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 62 |
import math
def __A ( _lowercase ):
'''simple docstring'''
_A = []
_A = 2
_A = int(math.sqrt(_lowercase ) ) # Size of every segment
_A = [True] * (end + 1)
_A = []
while start <= end:
if temp[start] is True:
in_prime.append(_lowercase )
for i in range(start * start , end + 1 , _lowercase ):
_A = False
start += 1
prime += in_prime
_A = end + 1
_A = min(2 * end , _lowercase )
while low <= n:
_A = [True] * (high - low + 1)
for each in in_prime:
_A = math.floor(low / each ) * each
if t < low:
t += each
for j in range(_lowercase , high + 1 , _lowercase ):
_A = False
for j in range(len(_lowercase ) ):
if temp[j] is True:
prime.append(j + low )
_A = high + 1
_A = min(high + end , _lowercase )
return prime
print(sieve(10**6))
| 62 | 1 |
from typing import Any
import numpy as np
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
return np.array_equal(lowerCAmelCase__ , matrix.conjugate().T )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : List[Any] = v.conjugate().T
SCREAMING_SNAKE_CASE_ : str = v_star.dot(lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , np.ndarray )
return (v_star_dot.dot(lowerCAmelCase__ )) / (v_star.dot(lowerCAmelCase__ ))
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : Optional[int] = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
SCREAMING_SNAKE_CASE_ : int = np.array([[1], [2], [3]] )
assert is_hermitian(lowerCAmelCase__ ), f'{a} is not hermitian.'
print(rayleigh_quotient(lowerCAmelCase__ , lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowerCAmelCase__ ), f'{a} is not hermitian.'
assert rayleigh_quotient(lowerCAmelCase__ , lowerCAmelCase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 345 | def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
_lowerCAmelCase : Optional[int] = str(bin(lowerCAmelCase__ ) )[2:] # remove the leading "0b"
_lowerCAmelCase : Optional[Any] = str(bin(lowerCAmelCase__ ) )[2:] # remove the leading "0b"
_lowerCAmelCase : List[Any] = max(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(lowerCAmelCase__ ) , b_binary.zfill(lowerCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 424 | 0 |
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = (CMStochasticIterativeScheduler,)
SCREAMING_SNAKE_CASE : int = 1_0
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,**lowercase__ : str ):
__lowercase = {
'''num_train_timesteps''': 2_0_1,
'''sigma_min''': 0.0_0_2,
'''sigma_max''': 8_0.0,
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = 1_0
__lowercase = self.get_scheduler_config()
__lowercase = self.scheduler_classes[0](**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
__lowercase = scheduler.timesteps[0]
__lowercase = scheduler.timesteps[1]
__lowercase = self.dummy_sample
__lowercase = 0.1 * sample
__lowercase = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).prev_sample
__lowercase = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_SCREAMING_SNAKE_CASE )
__lowercase = 1
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
__lowercase = scheduler.timesteps
__lowercase = torch.manual_seed(0 )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(_SCREAMING_SNAKE_CASE ):
# 1. scale model input
__lowercase = scheduler.scale_model_input(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# 2. predict noise residual
__lowercase = model(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# 3. predict previous sample x_t-1
__lowercase = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,generator=_SCREAMING_SNAKE_CASE ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
__lowercase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1_9_2.7_6_1_4 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_1_0 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_SCREAMING_SNAKE_CASE )
__lowercase = [1_0_6, 0]
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
__lowercase = scheduler.timesteps
__lowercase = torch.manual_seed(0 )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
__lowercase = scheduler.scale_model_input(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# 2. predict noise residual
__lowercase = model(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# 3. predict previous sample x_t-1
__lowercase = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,generator=_SCREAMING_SNAKE_CASE ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
__lowercase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 3_4_7.6_3_5_7 ) < 1e-2
assert abs(result_mean.item() - 0.4_5_2_7 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_SCREAMING_SNAKE_CASE )
__lowercase = [3_9, 3_0, 1_2, 1_5, 0]
with self.assertRaises(_SCREAMING_SNAKE_CASE ,msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_SCREAMING_SNAKE_CASE )
__lowercase = [3_9, 3_0, 1_2, 1, 0]
__lowercase = len(_SCREAMING_SNAKE_CASE )
with self.assertRaises(_SCREAMING_SNAKE_CASE ,msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_SCREAMING_SNAKE_CASE ,timesteps=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_SCREAMING_SNAKE_CASE )
__lowercase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_SCREAMING_SNAKE_CASE ,msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' ,):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
| 713 |
'''simple docstring'''
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase__ : int ,lowercase__ : List[str]=None ,lowercase__ : Any=True ,lowercase__ : Union[str, Any]=None ,**lowercase__ : Dict ):
__lowercase = parent
__lowercase = config_class
__lowercase = has_text_modality
__lowercase = kwargs
__lowercase = common_properties
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.config_class(**self.inputs_dict )
__lowercase = (
['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers''']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['''vocab_size'''] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(lowercase__ ,lowercase__ ) ,msg=F"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(lowercase__ ):
try:
setattr(lowercase__ ,lowercase__ ,lowercase__ )
self.parent.assertEqual(
getattr(lowercase__ ,lowercase__ ) ,lowercase__ ,msg=F"`{name} value {idx} expected, but was {getattr(lowercase__ ,lowercase__ )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(lowercase__ ):
try:
__lowercase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(lowercase__ ,lowercase__ ) ,lowercase__ ,msg=F"`{name} value {idx} expected, but was {getattr(lowercase__ ,lowercase__ )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.config_class(**self.inputs_dict )
__lowercase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = os.path.join(lowercase__ ,'''config.json''' )
config_first.to_json_file(lowercase__ )
__lowercase = self.config_class.from_json_file(lowercase__ )
self.parent.assertEqual(config_second.to_dict() ,config_first.to_dict() )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(lowercase__ )
__lowercase = self.config_class.from_pretrained(lowercase__ )
self.parent.assertEqual(config_second.to_dict() ,config_first.to_dict() )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.config_class(**self.inputs_dict )
__lowercase = '''test'''
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = os.path.join(lowercase__ ,lowercase__ )
config_first.save_pretrained(lowercase__ )
__lowercase = self.config_class.from_pretrained(lowercase__ ,subfolder=lowercase__ )
self.parent.assertEqual(config_second.to_dict() ,config_first.to_dict() )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.config_class(**self.inputs_dict ,num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) ,5 )
self.parent.assertEqual(len(config.labelaid ) ,5 )
__lowercase = 3
self.parent.assertEqual(len(config.idalabel ) ,3 )
self.parent.assertEqual(len(config.labelaid ) ,3 )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
if self.config_class.is_composition:
return
__lowercase = self.config_class()
self.parent.assertIsNotNone(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = copy.deepcopy(lowercase__ )
__lowercase = self.config_class(**lowercase__ )
__lowercase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) )
elif getattr(lowercase__ ,lowercase__ ) != value:
wrong_values.append((key, getattr(lowercase__ ,lowercase__ ), value) )
if len(lowercase__ ) > 0:
__lowercase = '''\n'''.join([F"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(F"The following keys were not properly set in the config:\n{errors}" )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 624 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCAmelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str]):
lowerCamelCase : int = list(UpperCAmelCase__)
lowerCamelCase : List[str] = list(UpperCAmelCase__)
lowerCamelCase : int = 0
for i in range(len(UpperCAmelCase__)):
if lista[i] != lista[i]:
count += 1
lowerCamelCase : Union[str, Any] = """_"""
if count > 1:
return False
else:
return "".join(UpperCAmelCase__)
def UpperCAmelCase ( UpperCAmelCase__ : Dict):
lowerCamelCase : List[str] = []
while True:
lowerCamelCase : Dict = ["""$"""] * len(UpperCAmelCase__)
lowerCamelCase : int = []
for i in range(len(UpperCAmelCase__)):
for j in range(i + 1 , len(UpperCAmelCase__)):
lowerCamelCase : str = compare_string(binary[i] , binary[j])
if k is False:
lowerCamelCase : Dict = """*"""
lowerCamelCase : List[str] = """*"""
temp.append('X')
for i in range(len(UpperCAmelCase__)):
if checka[i] == "$":
pi.append(binary[i])
if len(UpperCAmelCase__) == 0:
return pi
lowerCamelCase : int = list(set(UpperCAmelCase__))
def UpperCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : int):
lowerCamelCase : Union[str, Any] = []
for minterm in minterms:
lowerCamelCase : Tuple = """"""
for _ in range(UpperCAmelCase__):
lowerCamelCase : List[str] = str(minterm % 2) + string
minterm //= 2
temp.append(UpperCAmelCase__)
return temp
def UpperCAmelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str]):
lowerCamelCase : Union[str, Any] = list(UpperCAmelCase__)
lowerCamelCase : Tuple = list(UpperCAmelCase__)
lowerCamelCase : Optional[Any] = 0
for i in range(len(UpperCAmelCase__)):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCAmelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any]):
lowerCamelCase : List[str] = []
lowerCamelCase : Dict = [0] * len(UpperCAmelCase__)
for i in range(len(chart[0])):
lowerCamelCase : Tuple = 0
lowerCamelCase : Optional[int] = -1
for j in range(len(UpperCAmelCase__)):
if chart[j][i] == 1:
count += 1
lowerCamelCase : Dict = j
if count == 1:
lowerCamelCase : List[Any] = 1
for i in range(len(UpperCAmelCase__)):
if select[i] == 1:
for j in range(len(chart[0])):
if chart[i][j] == 1:
for k in range(len(UpperCAmelCase__)):
lowerCamelCase : Any = 0
temp.append(prime_implicants[i])
while True:
lowerCamelCase : Tuple = 0
lowerCamelCase : int = -1
lowerCamelCase : str = 0
for i in range(len(UpperCAmelCase__)):
lowerCamelCase : Optional[int] = chart[i].count(1)
if count_n > max_n:
lowerCamelCase : Optional[int] = count_n
lowerCamelCase : Union[str, Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem])
for i in range(len(chart[0])):
if chart[rem][i] == 1:
for j in range(len(UpperCAmelCase__)):
lowerCamelCase : int = 0
def UpperCAmelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any]):
lowerCamelCase : Optional[int] = [[0 for x in range(len(UpperCAmelCase__))] for x in range(len(UpperCAmelCase__))]
for i in range(len(UpperCAmelCase__)):
lowerCamelCase : Any = prime_implicants[i].count('_')
for j in range(len(UpperCAmelCase__)):
if is_for_table(prime_implicants[i] , binary[j] , UpperCAmelCase__):
lowerCamelCase : Optional[int] = 1
return chart
def UpperCAmelCase ( ):
lowerCamelCase : Union[str, Any] = int(input('Enter the no. of variables\n'))
lowerCamelCase : int = [
float(UpperCAmelCase__)
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n').split()
]
lowerCamelCase : List[str] = decimal_to_binary(UpperCAmelCase__ , UpperCAmelCase__)
lowerCamelCase : Optional[Any] = check(UpperCAmelCase__)
print('Prime Implicants are:')
print(UpperCAmelCase__)
lowerCamelCase : Union[str, Any] = prime_implicant_chart(UpperCAmelCase__ , UpperCAmelCase__)
lowerCamelCase : int = selection(UpperCAmelCase__ , UpperCAmelCase__)
print('Essential Prime Implicants are:')
print(UpperCAmelCase__)
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 320 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def _SCREAMING_SNAKE_CASE ( UpperCamelCase="ro" , UpperCamelCase="en" , UpperCamelCase="wmt16" , UpperCamelCase=None ):
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
lowerCAmelCase__ : Optional[Any] = f"""{src_lang}-{tgt_lang}"""
print(f"""Converting {dataset}-{pair}""" )
lowerCAmelCase__ : Any = datasets.load_dataset(UpperCamelCase , UpperCamelCase )
if save_dir is None:
lowerCAmelCase__ : Optional[Any] = f"""{dataset}-{pair}"""
lowerCAmelCase__ : Optional[Any] = Path(UpperCamelCase )
save_dir.mkdir(exist_ok=UpperCamelCase )
for split in ds.keys():
print(f"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
lowerCAmelCase__ : str = """val""" if split == """validation""" else split
lowerCAmelCase__ : Optional[int] = save_dir.joinpath(f"""{fn}.source""" )
lowerCAmelCase__ : Any = save_dir.joinpath(f"""{fn}.target""" )
lowerCAmelCase__ : Union[str, Any] = src_path.open("""w+""" )
lowerCAmelCase__ : Optional[int] = tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
lowerCAmelCase__ : Optional[int] = x["""translation"""]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(f"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 565 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase__ ( _UpperCamelCase : int | str ) -> bool:
"""simple docstring"""
snake_case = str(_UpperCamelCase )
return n == n[::-1]
def lowerCAmelCase__ ( _UpperCamelCase : int = 1_0_0_0_0_0_0 ) -> Dict:
"""simple docstring"""
snake_case = 0
for i in range(1 , _UpperCamelCase ):
if is_palindrome(_UpperCamelCase ) and is_palindrome(bin(_UpperCamelCase ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 704 | """simple docstring"""
import math
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> bool:
"""simple docstring"""
return math.sqrt(_UpperCamelCase ) * math.sqrt(_UpperCamelCase ) == num
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> bool:
"""simple docstring"""
snake_case = 0
snake_case = n
while left <= right:
snake_case = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
snake_case = mid - 1
else:
snake_case = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 99 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
SCREAMING_SNAKE_CASE = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
SCREAMING_SNAKE_CASE = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def a (lowerCAmelCase__ ):
if "://" in dataset_path:
__a = dataset_path.split("""://""" )[1]
return dataset_path
def a (lowerCAmelCase__ ):
if fs is not None and fs.protocol != "file":
return True
else:
return False
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__a = not is_remote_filesystem(lowerCAmelCase__ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowerCAmelCase__ ) , fs._strip_protocol(lowerCAmelCase__ ) )
else:
fs.mv(lowerCAmelCase__ , lowerCAmelCase__ , recursive=lowerCAmelCase__ )
def a ():
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__a = None
__a = None
__a = threading.Lock()
| 99 | 1 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_UpperCamelCase : int = False
class _lowerCAmelCase( unittest.TestCase):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase=32 )-> Union[str, Any]:
set_seed(0 )
__A = UNetaDModel(sample_size=UpperCAmelCase , in_channels=3 , out_channels=3 )
__A = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def SCREAMING_SNAKE_CASE__ ( self )-> Optional[Any]:
__A = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
__A = DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCAmelCase , )
__A = DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCAmelCase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
__A = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(UpperCAmelCase ) for _ in range(4 )]
__A = [torch.randn((4, 3, 32, 32) ).to(UpperCAmelCase ) for _ in range(4 )]
__A = [torch.randint(0 , 10_00 , (4,) ).long().to(UpperCAmelCase ) for _ in range(4 )]
# train with a DDPM scheduler
__A , __A = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCAmelCase )
for i in range(4 ):
optimizer.zero_grad()
__A = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
__A = model(UpperCAmelCase , timesteps[i] ).sample
__A = torch.nn.functional.mse_loss(UpperCAmelCase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
__A , __A = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCAmelCase )
for i in range(4 ):
optimizer.zero_grad()
__A = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
__A = model(UpperCAmelCase , timesteps[i] ).sample
__A = torch.nn.functional.mse_loss(UpperCAmelCase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-5 ) )
self.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-5 ) )
| 341 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase( _a , unittest.TestCase):
"""simple docstring"""
lowerCamelCase__ = GPTSanJapaneseTokenizer
lowerCamelCase__ = False
lowerCamelCase__ = {'''do_clean_text''': False, '''add_prefix_space''': False}
def SCREAMING_SNAKE_CASE__ ( self )-> List[Any]:
super().setUp()
# fmt: off
__A = ['''ใใ''', '''ใใใซ''', '''ใซใกใฏ''', '''ใฐใใฏ''', '''ไธ็,ใบ็''', '''ใ''', '''ใ''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>''']
# fmt: on
__A = {'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # ๐
__A = {'''unk_token''': '''<unk>'''}
__A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''emoji_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.emoji_file , '''w''' ) as emoji_writer:
emoji_writer.write(json.dumps(UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( self , **UpperCAmelCase )-> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase )-> Union[str, Any]:
__A = '''ใใใซใกใฏใไธ็ใ \nใใใฐใใฏใใบ็ใ๐'''
__A = '''ใใใซใกใฏใไธ็ใ \nใใใฐใใฏใไธ็ใ๐'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase )-> Optional[int]:
__A , __A = self.get_input_output_texts(UpperCAmelCase )
__A = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
__A = tokenizer.decode(UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase )
return text, ids
def SCREAMING_SNAKE_CASE__ ( self )-> Tuple:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self )-> Optional[Any]:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self )-> Union[str, Any]:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self )-> str:
__A = self.get_tokenizer()
# Testing tokenization
__A = '''ใใใซใกใฏใไธ็ใใใใใฐใใฏใใบ็ใ'''
__A = ['''ใใ''', '''ใซใกใฏ''', '''ใ''', '''ไธ็''', '''ใ''', '''<SP>''', '''ใใ''', '''ใฐใใฏ''', '''ใ''', '''ใบ็''', '''ใ''']
__A = tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# Testing conversion to ids without special tokens
__A = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__A = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# Testing conversion to ids with special tokens
__A = tokens + [tokenizer.unk_token]
__A = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__A = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self )-> Union[str, Any]:
__A = self.get_tokenizer()
# Testing tokenization
__A = '''ใใใซใกใฏใ<|bagoftoken|>ไธ็ใใใใฐใใฏใ<|bagoftoken|>ใบ็ใ'''
__A = '''ใใใซใกใฏใใใใไธ็ใใใใฐใใฏใใใใไธ็ใ'''
__A = tokenizer.encode(UpperCAmelCase )
__A = tokenizer.decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self )-> Dict:
__A = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
__A = '''ใใใซใกใฏใไธ็ใ'''
__A = '''ใใใฐใใฏใใบ็ใ๐'''
__A = '''ใใใซใกใฏใไธ็ใใใใฐใใฏใไธ็ใ๐'''
__A = tokenizer.encode(prefix_text + input_text )
__A = tokenizer.encode('''''' , prefix_text=prefix_text + input_text )
__A = tokenizer.encode(UpperCAmelCase , prefix_text=UpperCAmelCase )
__A = tokenizer.decode(UpperCAmelCase )
__A = tokenizer.decode(UpperCAmelCase )
__A = tokenizer.decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self )-> Optional[Any]:
__A = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
__A = '''ใใใซใกใฏใไธ็ใ'''
__A = '''ใใใฐใใฏใใบ็ใ๐'''
__A = len(tokenizer.encode(UpperCAmelCase ) ) - 2
__A = len(tokenizer.encode(UpperCAmelCase ) ) - 2
__A = [1] + [0] * (len_prefix + len_text + 1)
__A = [1] * (len_prefix + len_text + 1) + [0]
__A = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__A = tokenizer(prefix_text + input_text ).token_type_ids
__A = tokenizer('''''' , prefix_text=prefix_text + input_text ).token_type_ids
__A = tokenizer(UpperCAmelCase , prefix_text=UpperCAmelCase ).token_type_ids
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self )-> Optional[Any]:
__A = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
__A = tokenizer.encode('''ใใณใใฏ''' )
__A = tokenizer.encode('''''' , prefix_text='''ใใณใใฏ''' )
__A = tokenizer.encode('''ใใฏ''' , prefix_text='''ใใณ''' )
self.assertEqual(tokenizer.decode(UpperCAmelCase ) , tokenizer.decode(UpperCAmelCase ) )
self.assertEqual(tokenizer.decode(UpperCAmelCase ) , tokenizer.decode(UpperCAmelCase ) )
self.assertNotEqual(UpperCAmelCase , UpperCAmelCase )
self.assertNotEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def SCREAMING_SNAKE_CASE__ ( self )-> Optional[Any]:
__A = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
__A = [['''ๆญฆ็ฐไฟก็''', '''ใฏใ'''], ['''็น็ฐไฟก้ท''', '''ใฎ้
ไธใฎใ''']]
__A = tokenizer(UpperCAmelCase , padding=UpperCAmelCase )
__A = tokenizer.batch_encode_plus(UpperCAmelCase , padding=UpperCAmelCase )
# fmt: off
__A = [[3_59_93, 86_40, 2_59_48, 3_59_98, 3_06_47, 3_56_75, 3_59_99, 3_59_99], [3_59_93, 1_03_82, 98_68, 3_59_98, 3_06_46, 94_59, 3_06_46, 3_56_75]]
__A = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__A = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , UpperCAmelCase )
self.assertListEqual(x_token.token_type_ids , UpperCAmelCase )
self.assertListEqual(x_token.attention_mask , UpperCAmelCase )
self.assertListEqual(x_token_a.input_ids , UpperCAmelCase )
self.assertListEqual(x_token_a.token_type_ids , UpperCAmelCase )
self.assertListEqual(x_token_a.attention_mask , UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self )-> int:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def SCREAMING_SNAKE_CASE__ ( self )-> List[str]:
# tokenizer has no padding token
pass
| 341 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.