code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from PIL import Image
def lowerCamelCase( a__ ,a__):
def brightness(a__) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''')
return img.point(_lowerCamelCase)
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
snake_case_ : str = change_brightness(img, 1_00)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''') | 691 | """simple docstring"""
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self : str , UpperCAmelCase : int , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : int = 32 , UpperCAmelCase : bool = True , UpperCAmelCase : Union[int, float] = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[float, List[float]]] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , UpperCAmelCase : Optional[Union[float, List[float]]] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , UpperCAmelCase : bool = True , UpperCAmelCase : str=7 , UpperCAmelCase : Union[str, Any]=30 , UpperCAmelCase : int=400 , UpperCAmelCase : Any=3 , ):
__lowerCamelCase : str = parent
__lowerCamelCase : Tuple = do_resize
__lowerCamelCase : Tuple = size if size is not None else {"shortest_edge": 288}
__lowerCamelCase : List[Any] = size_divisor
__lowerCamelCase : List[str] = do_rescale
__lowerCamelCase : List[Any] = rescale_factor
__lowerCamelCase : Optional[int] = do_normalize
__lowerCamelCase : Tuple = do_center_crop
__lowerCamelCase : List[Any] = image_mean
__lowerCamelCase : Tuple = image_std
__lowerCamelCase : Any = do_pad
__lowerCamelCase : int = batch_size
__lowerCamelCase : Optional[Any] = num_channels
__lowerCamelCase : Optional[int] = min_resolution
__lowerCamelCase : Optional[int] = max_resolution
def lowerCamelCase__ ( self : Tuple ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any]=False ):
if not batched:
__lowerCamelCase : List[str] = self.size["shortest_edge"]
__lowerCamelCase : Optional[int] = image_inputs[0]
if isinstance(UpperCAmelCase , Image.Image ):
__lowerCamelCase , __lowerCamelCase : str = image.size
else:
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = image.shape[1], image.shape[2]
__lowerCamelCase : Optional[Any] = size / min(UpperCAmelCase , UpperCAmelCase )
if h < w:
__lowerCamelCase , __lowerCamelCase : int = size, scale * w
else:
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = scale * h, size
__lowerCamelCase : Optional[Any] = int((1333 / 800) * size )
if max(UpperCAmelCase , UpperCAmelCase ) > max_size:
__lowerCamelCase : Union[str, Any] = max_size / max(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Dict = newh * scale
__lowerCamelCase : str = neww * scale
__lowerCamelCase , __lowerCamelCase : Optional[int] = int(newh + 0.5 ), int(neww + 0.5 )
__lowerCamelCase , __lowerCamelCase : Dict = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__lowerCamelCase : Optional[int] = []
for image in image_inputs:
__lowerCamelCase , __lowerCamelCase : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCamelCase : Any = max(UpperCAmelCase , key=lambda UpperCAmelCase : item[0] )[0]
__lowerCamelCase : Any = max(UpperCAmelCase , key=lambda UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _snake_case ( a__ , unittest.TestCase ):
snake_case__ = BridgeTowerImageProcessor if is_vision_available() else None
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Tuple = BridgeTowerImageProcessingTester(self )
@property
def lowerCamelCase__ ( self : str ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(UpperCAmelCase , "image_std" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase , "size" ) )
self.assertTrue(hasattr(UpperCAmelCase , "size_divisor" ) )
def lowerCamelCase__ ( self : Union[str, Any] ):
pass
def lowerCamelCase__ ( self : Optional[Any] ):
# Initialize image processor
__lowerCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
__lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__lowerCamelCase , __lowerCamelCase : List[str] = self.image_processor_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase : int = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase__ ( self : List[Any] ):
# Initialize image processor
__lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
__lowerCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__lowerCamelCase , __lowerCamelCase : Dict = self.image_processor_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase : str = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
__lowerCamelCase , __lowerCamelCase : List[str] = self.image_processor_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase__ ( self : Optional[int] ):
# Initialize image processor
__lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
__lowerCamelCase : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase : str = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , ) | 646 | 0 |
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
__lowerCAmelCase : Dict ="""https://openaipublic.azureedge.net/jukebox/models/"""
__lowerCAmelCase : Optional[int] ={
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] ) -> Dict:
'''simple docstring'''
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 1_0:
lowercase = key.replace(""".model.1.bias""" , """.conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 1_0:
lowercase = key.replace(""".model.1.weight""" , """.conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 1_0:
lowercase = key.replace(""".model.3.bias""" , """.conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 1_0:
lowercase = key.replace(""".model.3.weight""" , """.conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
lowercase = key.replace("""conditioner_blocks.0""" , """conditioner_blocks""" )
if "prime_prior" in key:
lowercase = key.replace("""prime_prior""" , """encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
lowercase = key.replace(""".emb.""" , """.""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" , """.codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""" , """metadata_embedding.""" )
if "x_emb.emb." in key:
lowercase = key.replace("""0.x_emb.emb""" , """embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" , """encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""" , """.layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""" , """_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" , """encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""" , """encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""" , """fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""" , """embed_tokens""" )
return key
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase = {}
import re
lowercase = re.compile(R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
lowercase = re.compile(
R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
lowercase = re.compile(R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
lowercase = re.compile(R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
lowercase = re.compile(
R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
lowercase = re.compile(R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
lowercase = re.compile(R"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
lowercase = re.compile(
R"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
lowercase = re.compile(R"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(lowerCAmelCase__ ):
lowercase = re_encoder_block_conv_in.match(lowerCAmelCase__ )
lowercase = regex_match.groups()
lowercase = int(groups[2] ) * 2 + int(groups[3] )
lowercase = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
lowercase = re_encoder_block_conv_in.sub(lowerCAmelCase__ , lowerCAmelCase__ )
elif re_encoder_block_resnet.fullmatch(lowerCAmelCase__ ):
lowercase = re_encoder_block_resnet.match(lowerCAmelCase__ )
lowercase = regex_match.groups()
lowercase = int(groups[2] ) * 2 + int(groups[3] )
lowercase = {"""1""": 1, """3""": 2}[groups[-2]]
lowercase = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
lowercase = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
lowercase = prefix + resnet_block
lowercase = re_encoder_block_resnet.sub(lowerCAmelCase__ , lowerCAmelCase__ )
elif re_encoder_block_proj_out.fullmatch(lowerCAmelCase__ ):
lowercase = re_encoder_block_proj_out.match(lowerCAmelCase__ )
lowercase = regex_match.groups()
lowercase = f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
lowercase = re_encoder_block_proj_out.sub(lowerCAmelCase__ , lowerCAmelCase__ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(lowerCAmelCase__ ):
lowercase = re_decoder_block_conv_out.match(lowerCAmelCase__ )
lowercase = regex_match.groups()
lowercase = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowercase = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
lowercase = re_decoder_block_conv_out.sub(lowerCAmelCase__ , lowerCAmelCase__ )
elif re_decoder_block_resnet.fullmatch(lowerCAmelCase__ ):
lowercase = re_decoder_block_resnet.match(lowerCAmelCase__ )
lowercase = regex_match.groups()
lowercase = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowercase = {"""1""": 1, """3""": 2}[groups[-2]]
lowercase = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
lowercase = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
lowercase = prefix + resnet_block
lowercase = re_decoder_block_resnet.sub(lowerCAmelCase__ , lowerCAmelCase__ )
elif re_decoder_block_proj_in.fullmatch(lowerCAmelCase__ ):
lowercase = re_decoder_block_proj_in.match(lowerCAmelCase__ )
lowercase = regex_match.groups()
lowercase = f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
lowercase = re_decoder_block_proj_in.sub(lowerCAmelCase__ , lowerCAmelCase__ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(lowerCAmelCase__ ):
lowercase = re_prior_cond_conv_out.match(lowerCAmelCase__ )
lowercase = regex_match.groups()
lowercase = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowercase = f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
lowercase = re_prior_cond_conv_out.sub(lowerCAmelCase__ , lowerCAmelCase__ )
elif re_prior_cond_resnet.fullmatch(lowerCAmelCase__ ):
lowercase = re_prior_cond_resnet.match(lowerCAmelCase__ )
lowercase = regex_match.groups()
lowercase = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowercase = {"""1""": 1, """3""": 2}[groups[-2]]
lowercase = f'conditioner_blocks.upsampler.upsample_block.{block_index}.'
lowercase = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
lowercase = prefix + resnet_block
lowercase = re_prior_cond_resnet.sub(lowerCAmelCase__ , lowerCAmelCase__ )
elif re_prior_cond_proj_in.fullmatch(lowerCAmelCase__ ):
lowercase = re_prior_cond_proj_in.match(lowerCAmelCase__ )
lowercase = regex_match.groups()
lowercase = f'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
lowercase = re_prior_cond_proj_in.sub(lowerCAmelCase__ , lowerCAmelCase__ )
# keep original key
else:
lowercase = original_key
lowercase = replace_key(lowerCAmelCase__ )
if f'{key_prefix}.{key}' not in model_state_dict or key is None:
print(f'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[f'{key_prefix}.{key}'].shape:
lowercase = model_state_dict[f'{key_prefix}.{key}']
print(f'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
lowercase = original_key
lowercase = original_key
lowercase = value
return new_dict
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :Any=None , lowerCAmelCase__ :Dict=None ) -> Dict:
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
lowercase = requests.get(f'{PREFIX}{file}' , allow_redirects=lowerCAmelCase__ )
os.makedirs(f'{pytorch_dump_folder_path}/' , exist_ok=lowerCAmelCase__ )
open(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , """wb""" ).write(r.content )
lowercase = MODEL_MAPPING[model_name.split("""/""" )[-1]]
lowercase = JukeboxConfig.from_pretrained(lowerCAmelCase__ )
lowercase = JukeboxModel(lowerCAmelCase__ )
lowercase = []
lowercase = {}
for i, dict_name in enumerate(lowerCAmelCase__ ):
lowercase = torch.load(f'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )["""model"""]
lowercase = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
lowercase = old_dic[k]
elif k.endswith(""".w""" ):
lowercase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
lowercase = old_dic[k]
else:
lowercase = old_dic[k]
lowercase = """vqvae""" if i == 0 else f'priors.{3 - i}'
lowercase = fix_jukebox_keys(lowerCAmelCase__ , model.state_dict() , lowerCAmelCase__ , lowerCAmelCase__ )
weight_dict.append(lowerCAmelCase__ )
lowercase = weight_dict.pop(0 )
model.vqvae.load_state_dict(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
with open(f'{pytorch_dump_folder_path}/mapping.json' , """w""" ) as txtfile:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase__ )
return weight_dict
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
__lowerCAmelCase : str =parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 700 | """simple docstring"""
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
__lowerCAmelCase : str ="""\
@inproceedings{lin-2004-rouge,
title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",
author = \"Lin, Chin-Yew\",
booktitle = \"Text Summarization Branches Out\",
month = jul,
year = \"2004\",
address = \"Barcelona, Spain\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W04-1013\",
pages = \"74--81\",
}
"""
__lowerCAmelCase : str ="""\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
"""
__lowerCAmelCase : Optional[int] ="""
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,
`\"rougeL\"`: Longest common subsequence based scoring.
`\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric('rouge')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
>>> print(results[\"rouge1\"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results[\"rouge1\"].mid.fmeasure)
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def A__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/google-research/tree/master/rouge"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/ROUGE_(metric)""",
"""https://github.com/google-research/google-research/tree/master/rouge""",
] , )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=False ):
"""simple docstring"""
if rouge_types is None:
lowercase = ["""rouge1""", """rouge2""", """rougeL""", """rougeLsum"""]
lowercase = rouge_scorer.RougeScorer(rouge_types=__lowerCAmelCase , use_stemmer=__lowerCAmelCase )
if use_aggregator:
lowercase = scoring.BootstrapAggregator()
else:
lowercase = []
for ref, pred in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowercase = scorer.score(__lowerCAmelCase , __lowerCAmelCase )
if use_aggregator:
aggregator.add_scores(__lowerCAmelCase )
else:
scores.append(__lowerCAmelCase )
if use_aggregator:
lowercase = aggregator.aggregate()
else:
lowercase = {}
for key in scores[0]:
lowercase = [score[key] for score in scores]
return result
| 197 | 0 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCAmelCase_ = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
lowerCAmelCase_ = {
'169M': 7_68,
'430M': 10_24,
'1B5': 20_48,
'3B': 25_60,
'7B': 40_96,
'14B': 51_20,
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = list(state_dict.keys() )
for name in state_dict_keys:
snake_case_ = state_dict.pop(SCREAMING_SNAKE_CASE__ )
# emb -> embedding
if name.startswith('''emb.''' ):
snake_case_ = name.replace('''emb.''' , '''embeddings.''' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('''blocks.0.ln0''' ):
snake_case_ = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' )
# att -> attention
snake_case_ = re.sub(R'''blocks\.(\d+)\.att''' , R'''blocks.\1.attention''' , SCREAMING_SNAKE_CASE__ )
# ffn -> feed_forward
snake_case_ = re.sub(R'''blocks\.(\d+)\.ffn''' , R'''blocks.\1.feed_forward''' , SCREAMING_SNAKE_CASE__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith('''.time_mix_k''' ):
snake_case_ = name.replace('''.time_mix_k''' , '''.time_mix_key''' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('''.time_mix_v''' ):
snake_case_ = name.replace('''.time_mix_v''' , '''.time_mix_value''' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('''.time_mix_r''' ):
snake_case_ = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' )
if name != "head.weight":
snake_case_ = """rwkv.""" + name
snake_case_ = weight
return state_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=None ):
if tokenizer_file is None:
print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' )
snake_case_ = 50277
snake_case_ = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' )
else:
snake_case_ = PreTrainedTokenizerFast(tokenizer_file=SCREAMING_SNAKE_CASE__ )
snake_case_ = len(SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
# 2. Build the config
snake_case_ = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
snake_case_ = candidate
break
if size is None:
raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' )
if size not in possible_sizes:
raise ValueError(F'''`size` should be one of {possible_sizes}, got {size}.''' )
snake_case_ = RwkvConfig(
vocab_size=SCREAMING_SNAKE_CASE__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(SCREAMING_SNAKE_CASE__ )
# 3. Download model file then convert state_dict
snake_case_ = hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.load(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )
snake_case_ = convert_state_dict(SCREAMING_SNAKE_CASE__ )
# 4. Split in shards and save
snake_case_ = shard_checkpoint(SCREAMING_SNAKE_CASE__ )
for shard_file, shard in shards.items():
torch.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
if index is not None:
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save the index as well
with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f:
snake_case_ = json.dumps(SCREAMING_SNAKE_CASE__ , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ ) + """\n"""
f.write(SCREAMING_SNAKE_CASE__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' )
snake_case_ = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
snake_case_ = torch.load(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' )
snake_case_ = AutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__ )
model.push_to_hub(SCREAMING_SNAKE_CASE__ , max_shard_size='''2GB''' )
tokenizer.push_to_hub(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
lowerCAmelCase_ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
) | 39 |
'''simple docstring'''
import numpy as np
class lowercase__ :
'''simple docstring'''
def __init__( self ):
_SCREAMING_SNAKE_CASE : List[Any] = (0, 0)
_SCREAMING_SNAKE_CASE : List[str] = None
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
_SCREAMING_SNAKE_CASE : List[str] = 0
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
def __eq__( self , __snake_case ):
return self.position == cell.position
def UpperCAmelCase_ ( self ):
print(self.position )
class lowercase__ :
'''simple docstring'''
def __init__( self , __snake_case=(5, 5) ):
_SCREAMING_SNAKE_CASE : str = np.zeros(__snake_case )
_SCREAMING_SNAKE_CASE : Optional[Any] = world_size[0]
_SCREAMING_SNAKE_CASE : Tuple = world_size[1]
def UpperCAmelCase_ ( self ):
print(self.w )
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : Optional[int] = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
_SCREAMING_SNAKE_CASE : Optional[int] = cell.position[0]
_SCREAMING_SNAKE_CASE : Optional[int] = cell.position[1]
_SCREAMING_SNAKE_CASE : Tuple = []
for n in neughbour_cord:
_SCREAMING_SNAKE_CASE : Optional[Any] = current_x + n[0]
_SCREAMING_SNAKE_CASE : List[Any] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
_SCREAMING_SNAKE_CASE : Optional[int] = Cell()
_SCREAMING_SNAKE_CASE : Union[str, Any] = (x, y)
_SCREAMING_SNAKE_CASE : Dict = cell
neighbours.append(__snake_case )
return neighbours
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = []
_SCREAMING_SNAKE_CASE : Optional[Any] = []
_open.append(SCREAMING_SNAKE_CASE__ )
while _open:
_SCREAMING_SNAKE_CASE : Any = np.argmin([n.f for n in _open] )
_SCREAMING_SNAKE_CASE : List[Any] = _open[min_f]
_closed.append(_open.pop(SCREAMING_SNAKE_CASE__ ) )
if current == goal:
break
for n in world.get_neigbours(SCREAMING_SNAKE_CASE__ ):
for c in _closed:
if c == n:
continue
_SCREAMING_SNAKE_CASE : Optional[int] = current.g + 1
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = n.position
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = goal.position
_SCREAMING_SNAKE_CASE : Dict = (ya - ya) ** 2 + (xa - xa) ** 2
_SCREAMING_SNAKE_CASE : Any = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Any = []
while current.parent is not None:
path.append(current.position )
_SCREAMING_SNAKE_CASE : List[Any] = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = Gridworld()
# Start position and goal
UpperCAmelCase_ : Optional[Any] = Cell()
UpperCAmelCase_ : Tuple = (0, 0)
UpperCAmelCase_ : Optional[Any] = Cell()
UpperCAmelCase_ : Tuple = (4, 4)
print(F"path from {start.position} to {goal.position}")
UpperCAmelCase_ : Union[str, Any] = astar(world, start, goal)
# Just for visual reasons.
for i in s:
UpperCAmelCase_ : Optional[int] = 1
print(world.w)
| 533 | 0 |
'''simple docstring'''
import math
import unittest
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase (self ) -> int:
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def _lowerCamelCase (self ) -> Optional[Any]:
with self.assertRaises(_a ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1 ) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 438 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class UpperCAmelCase__ ( _snake_case ):
"""simple docstring"""
A : Any = '''mra'''
def __init__(self , _a=50_265 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=1 , _a=0.02 , _a=1e-5 , _a="absolute" , _a=4 , _a="full" , _a=0 , _a=0 , _a=1 , _a=0 , _a=2 , **_a , ) -> int:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
lowercase_ : Union[str, Any] = vocab_size
lowercase_ : List[str] = max_position_embeddings
lowercase_ : Optional[Any] = hidden_size
lowercase_ : Optional[int] = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Optional[int] = intermediate_size
lowercase_ : Dict = hidden_act
lowercase_ : str = hidden_dropout_prob
lowercase_ : int = attention_probs_dropout_prob
lowercase_ : str = initializer_range
lowercase_ : Union[str, Any] = type_vocab_size
lowercase_ : Any = layer_norm_eps
lowercase_ : Union[str, Any] = position_embedding_type
lowercase_ : Any = block_per_row
lowercase_ : Optional[int] = approx_mode
lowercase_ : int = initial_prior_first_n_blocks
lowercase_ : str = initial_prior_diagonal_n_blocks
| 438 | 1 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case : Dict = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_UpperCAmelCase ) )
def lowercase_ ( self ):
__snake_case : Dict = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_UpperCAmelCase ) )
def lowercase_ ( self ):
__snake_case : List[str] = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_UpperCAmelCase ) )
def lowercase_ ( self ):
__snake_case : Any = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_UpperCAmelCase ) )
def lowercase_ ( self ):
__snake_case : Tuple = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_UpperCAmelCase ) )
def lowercase_ ( self ):
__snake_case : int = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__snake_case : Union[str, Any] = 'fp16'
self.assertTrue(is_safetensors_compatible(_UpperCAmelCase , variant=_UpperCAmelCase ) )
def lowercase_ ( self ):
__snake_case : int = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__snake_case : Optional[Any] = 'fp16'
self.assertTrue(is_safetensors_compatible(_UpperCAmelCase , variant=_UpperCAmelCase ) )
def lowercase_ ( self ):
# pass variant but use the non-variant filenames
__snake_case : Union[str, Any] = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
__snake_case : int = 'fp16'
self.assertTrue(is_safetensors_compatible(_UpperCAmelCase , variant=_UpperCAmelCase ) )
def lowercase_ ( self ):
__snake_case : Dict = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__snake_case : str = 'fp16'
self.assertFalse(is_safetensors_compatible(_UpperCAmelCase , variant=_UpperCAmelCase ) )
def lowercase_ ( self ):
__snake_case : List[str] = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
__snake_case : Tuple = 'fp16'
self.assertTrue(is_safetensors_compatible(_UpperCAmelCase , variant=_UpperCAmelCase ) )
def lowercase_ ( self ):
# pass variant but use the non-variant filenames
__snake_case : Union[str, Any] = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
__snake_case : Any = 'fp16'
self.assertTrue(is_safetensors_compatible(_UpperCAmelCase , variant=_UpperCAmelCase ) )
def lowercase_ ( self ):
__snake_case : Dict = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__snake_case : Dict = 'fp16'
self.assertFalse(is_safetensors_compatible(_UpperCAmelCase , variant=_UpperCAmelCase ) )
| 576 | from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__ = logging.get_logger(__name__)
# TODO: upload to AWS
__magic_name__ = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "retribert"
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=8 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=True , _UpperCAmelCase=128 , _UpperCAmelCase=0 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : Tuple = vocab_size
__snake_case : int = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : Union[str, Any] = num_attention_heads
__snake_case : Union[str, Any] = hidden_act
__snake_case : str = intermediate_size
__snake_case : Any = hidden_dropout_prob
__snake_case : int = attention_probs_dropout_prob
__snake_case : Union[str, Any] = max_position_embeddings
__snake_case : str = type_vocab_size
__snake_case : str = initializer_range
__snake_case : Union[str, Any] = layer_norm_eps
__snake_case : Tuple = share_encoders
__snake_case : int = projection_dim
| 576 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCamelCase = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.cross_attn.out_proj.weight",
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.cross_attn.out_proj.bias",
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.weight", f"decoder.layers.{i}.sa_qcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.weight", f"decoder.layers.{i}.sa_kcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qpos_proj.weight", f"decoder.layers.{i}.sa_qpos_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kpos_proj.weight", f"decoder.layers.{i}.sa_kpos_proj.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.weight", f"decoder.layers.{i}.sa_v_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.weight", f"decoder.layers.{i}.ca_qcontent_proj.weight")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.weight", f"decoder.layers.{i}.ca_kcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kpos_proj.weight", f"decoder.layers.{i}.ca_kpos_proj.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.weight", f"decoder.layers.{i}.ca_v_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight", f"decoder.layers.{i}.ca_qpos_sine_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.bias", f"decoder.layers.{i}.sa_qcontent_proj.bias")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.bias", f"decoder.layers.{i}.sa_kcontent_proj.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.sa_qpos_proj.bias", f"decoder.layers.{i}.sa_qpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.sa_kpos_proj.bias", f"decoder.layers.{i}.sa_kpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.bias", f"decoder.layers.{i}.sa_v_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.bias", f"decoder.layers.{i}.ca_qcontent_proj.bias")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.bias", f"decoder.layers.{i}.ca_kcontent_proj.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.ca_kpos_proj.bias", f"decoder.layers.{i}.ca_kpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.bias", f"decoder.layers.{i}.ca_v_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias", f"decoder.layers.{i}.ca_qpos_sine_proj.bias")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = state_dict.pop(snake_case__ )
_SCREAMING_SNAKE_CASE = val
def __lowerCamelCase ( snake_case__ ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_SCREAMING_SNAKE_CASE = key.replace("""backbone.0.body""" ,"""backbone.conv_encoder.model""" )
_SCREAMING_SNAKE_CASE = value
else:
_SCREAMING_SNAKE_CASE = value
return new_state_dict
def __lowerCamelCase ( snake_case__ ,snake_case__=False ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """"""
if is_panoptic:
_SCREAMING_SNAKE_CASE = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_SCREAMING_SNAKE_CASE = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
_SCREAMING_SNAKE_CASE = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_SCREAMING_SNAKE_CASE = in_proj_weight[:2_56, :]
_SCREAMING_SNAKE_CASE = in_proj_bias[:2_56]
_SCREAMING_SNAKE_CASE = in_proj_weight[2_56:5_12, :]
_SCREAMING_SNAKE_CASE = in_proj_bias[2_56:5_12]
_SCREAMING_SNAKE_CASE = in_proj_weight[-2_56:, :]
_SCREAMING_SNAKE_CASE = in_proj_bias[-2_56:]
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_SCREAMING_SNAKE_CASE = Image.open(requests.get(snake_case__ ,stream=snake_case__ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
_SCREAMING_SNAKE_CASE = """resnet101"""
if "dc5" in model_name:
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = """panoptic""" in model_name
if is_panoptic:
_SCREAMING_SNAKE_CASE = 2_50
else:
_SCREAMING_SNAKE_CASE = 91
_SCREAMING_SNAKE_CASE = """huggingface/label-files"""
_SCREAMING_SNAKE_CASE = """coco-detection-id2label.json"""
_SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(snake_case__ ,snake_case__ ,repo_type="""dataset""" ) ,"""r""" ) )
_SCREAMING_SNAKE_CASE = {int(snake_case__ ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = idalabel
_SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
# load image processor
_SCREAMING_SNAKE_CASE = """coco_panoptic""" if is_panoptic else """coco_detection"""
_SCREAMING_SNAKE_CASE = ConditionalDetrImageProcessor(format=snake_case__ )
# prepare image
_SCREAMING_SNAKE_CASE = prepare_img()
_SCREAMING_SNAKE_CASE = image_processor(images=snake_case__ ,return_tensors="""pt""" )
_SCREAMING_SNAKE_CASE = encoding["""pixel_values"""]
logger.info(F'Converting model {model_name}...' )
# load original model from torch hub
_SCREAMING_SNAKE_CASE = torch.hub.load("""DeppMeng/ConditionalDETR""" ,snake_case__ ,pretrained=snake_case__ ).eval()
_SCREAMING_SNAKE_CASE = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
_SCREAMING_SNAKE_CASE = """conditional_detr.""" + src
rename_key(snake_case__ ,snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = rename_backbone_keys(snake_case__ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case__ ,is_panoptic=snake_case__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_SCREAMING_SNAKE_CASE = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
_SCREAMING_SNAKE_CASE = state_dict.pop(snake_case__ )
_SCREAMING_SNAKE_CASE = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_SCREAMING_SNAKE_CASE = state_dict.pop(snake_case__ )
_SCREAMING_SNAKE_CASE = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
_SCREAMING_SNAKE_CASE = state_dict.pop(snake_case__ )
_SCREAMING_SNAKE_CASE = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
_SCREAMING_SNAKE_CASE = state_dict.pop(snake_case__ )
_SCREAMING_SNAKE_CASE = val
# finally, create HuggingFace model and load state dict
_SCREAMING_SNAKE_CASE = ConditionalDetrForSegmentation(snake_case__ ) if is_panoptic else ConditionalDetrForObjectDetection(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
model.push_to_hub(repo_id=snake_case__ ,organization="""DepuMeng""" ,commit_message="""Add model""" )
# verify our conversion
_SCREAMING_SNAKE_CASE = conditional_detr(snake_case__ )
_SCREAMING_SNAKE_CASE = model(snake_case__ )
assert torch.allclose(outputs.logits ,original_outputs["""pred_logits"""] ,atol=1e-4 )
assert torch.allclose(outputs.pred_boxes ,original_outputs["""pred_boxes"""] ,atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks ,original_outputs["""pred_masks"""] ,atol=1e-4 )
# Save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 569 |
from cva import destroyAllWindows, imread, imshow, waitKey
def __lowerCamelCase ( snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(snake_case__ ):
for j in range(snake_case__ ):
_SCREAMING_SNAKE_CASE = [2_55, 2_55, 2_55] - img[i][j]
return img
if __name__ == "__main__":
# read original image
UpperCamelCase = imread('''image_data/lena.jpg''', 1)
# convert to its negative
UpperCamelCase = convert_to_negative(img)
# show result image
imshow('''negative of original image''', img)
waitKey(0)
destroyAllWindows()
| 569 | 1 |
'''simple docstring'''
from math import ceil
def lowerCAmelCase ( UpperCamelCase__ : int = 1_0_0_1 ):
"""simple docstring"""
__UpperCAmelCase = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
__UpperCAmelCase = 2 * i + 1
__UpperCAmelCase = 2 * i
__UpperCAmelCase = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__lowerCAmelCase : List[str] = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number")
| 262 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase : Optional[Any] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
UpperCamelCase : Dict = {
"""allenai/led-base-16384""": 1_6384,
}
class A__ ( A__ ):
"""simple docstring"""
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = LEDTokenizer
_lowercase = ['input_ids', 'attention_mask']
def __init__( self : Tuple , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : int="replace" , lowerCamelCase__ : Union[str, Any]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Tuple="</s>" , lowerCamelCase__ : Optional[int]="<s>" , lowerCamelCase__ : str="<unk>" , lowerCamelCase__ : Any="<pad>" , lowerCamelCase__ : Any="<mask>" , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : int=True , **lowerCamelCase__ : Union[str, Any] , ):
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , )
a__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space:
a__ : List[str] = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) )
a__ : Optional[Any] = add_prefix_space
a__ : List[str] = pre_tok_class(**lowerCamelCase__ )
a__ : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
a__ : Any = "post_processor"
a__ : str = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
if tokenizer_component_instance:
a__ : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a__ : Optional[Any] = tuple(state["sep"] )
if "cls" in state:
a__ : Optional[Any] = tuple(state["cls"] )
a__ : Optional[int] = False
if state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space:
a__ : Dict = add_prefix_space
a__ : int = True
if state.get("trim_offsets" , lowerCamelCase__ ) != trim_offsets:
a__ : List[Any] = trim_offsets
a__ : List[str] = True
if changes_to_apply:
a__ : int = getattr(lowerCamelCase__ , state.pop("type" ) )
a__ : int = component_class(**lowerCamelCase__ )
setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _UpperCamelCase( self : Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Union[str, Any] ):
a__ : Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value
a__ : Union[str, Any] = value
def _UpperCamelCase( self : Any , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ):
a__ : List[str] = kwargs.get("is_split_into_words" , lowerCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : Any , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Optional[Any] ):
a__ : Dict = kwargs.get("is_split_into_words" , lowerCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ):
a__ : List[str] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=None ):
a__ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ):
a__ : List[str] = [self.sep_token_id]
a__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase( self : Dict , lowerCamelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , ):
a__ : str = super()._pad(
encoded_inputs=lowerCamelCase__ , max_length=lowerCamelCase__ , padding_strategy=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
# Load from model defaults
if return_attention_mask is None:
a__ : Optional[int] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
a__ : Tuple = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
a__ : Dict = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase__ )
if needs_to_be_padded:
a__ : Union[str, Any] = len(lowerCamelCase__ ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
a__ : List[Any] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
a__ : Any = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 37 | 0 |
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : Any = PriorTransformer
__SCREAMING_SNAKE_CASE : Dict = """hidden_states"""
@property
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = 4
_UpperCAmelCase = 8
_UpperCAmelCase = 7
_UpperCAmelCase = floats_tensor((batch_size, embedding_dim) ).to(__UpperCamelCase )
_UpperCAmelCase = floats_tensor((batch_size, embedding_dim) ).to(__UpperCamelCase )
_UpperCAmelCase = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(__UpperCamelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Optional[int]=0 ):
torch.manual_seed(__UpperCamelCase )
_UpperCAmelCase = 4
_UpperCAmelCase = 8
_UpperCAmelCase = 7
_UpperCAmelCase = torch.randn((batch_size, embedding_dim) ).to(__UpperCamelCase )
_UpperCAmelCase = torch.randn((batch_size, embedding_dim) ).to(__UpperCamelCase )
_UpperCAmelCase = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(__UpperCamelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def UpperCAmelCase__ ( self : List[Any] ):
return (4, 8)
@property
def UpperCAmelCase__ ( self : Tuple ):
return (4, 8)
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = {
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy" , output_loading_info=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__UpperCamelCase )
_UpperCAmelCase = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase , _UpperCAmelCase = self.prepare_init_args_and_inputs_for_common()
_UpperCAmelCase = self.model_class(**__UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2] , __UpperCamelCase )
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" )
_UpperCAmelCase = model.to(__UpperCamelCase )
if hasattr(__UpperCamelCase , "set_default_attn_processor" ):
model.set_default_attn_processor()
_UpperCAmelCase = self.get_dummy_seed_input()
with torch.no_grad():
_UpperCAmelCase = model(**__UpperCamelCase )[0]
_UpperCAmelCase = output[0, :5].flatten().cpu()
print(__UpperCamelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
_UpperCAmelCase = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] )
self.assertTrue(torch_all_close(__UpperCamelCase , __UpperCamelCase , rtol=1e-2 ) )
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Optional[int]=1 , __UpperCamelCase : int=768 , __UpperCamelCase : Dict=77 , __UpperCamelCase : Tuple=0 ):
torch.manual_seed(__UpperCamelCase )
_UpperCAmelCase = batch_size
_UpperCAmelCase = embedding_dim
_UpperCAmelCase = num_embeddings
_UpperCAmelCase = torch.randn((batch_size, embedding_dim) ).to(__UpperCamelCase )
_UpperCAmelCase = torch.randn((batch_size, embedding_dim) ).to(__UpperCamelCase )
_UpperCAmelCase = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(__UpperCamelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def UpperCAmelCase__ ( self : Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
] )
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Any , __UpperCamelCase : Tuple ):
_UpperCAmelCase = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior" , subfolder="prior" )
model.to(__UpperCamelCase )
_UpperCAmelCase = self.get_dummy_seed_input(seed=__UpperCamelCase )
with torch.no_grad():
_UpperCAmelCase = model(**__UpperCamelCase )[0]
assert list(sample.shape ) == [1, 768]
_UpperCAmelCase = sample[0, :8].flatten().cpu()
print(__UpperCamelCase )
_UpperCAmelCase = torch.tensor(__UpperCamelCase )
assert torch_all_close(__UpperCamelCase , __UpperCamelCase , atol=1e-3 )
| 129 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__lowerCAmelCase = "sshleifer/bart-tiny-random"
__lowerCAmelCase = "patrickvonplaten/t5-tiny-random"
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@cached_property
def UpperCAmelCase__ ( self : Tuple ):
return AutoConfig.from_pretrained(__UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase , *_UpperCAmelCase = create_student_by_copying_alternating_layers(__UpperCamelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase , *_UpperCAmelCase = create_student_by_copying_alternating_layers(__UpperCamelCase , tempfile.mkdtemp() , e=1 , d=__UpperCamelCase )
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase , *_UpperCAmelCase = create_student_by_copying_alternating_layers(__UpperCamelCase , tempfile.mkdtemp() , e=1 , d=__UpperCamelCase )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase , *_UpperCAmelCase = create_student_by_copying_alternating_layers(__UpperCamelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def UpperCAmelCase__ ( self : str ):
with self.assertRaises(__UpperCamelCase ):
create_student_by_copying_alternating_layers(__UpperCamelCase , tempfile.mkdtemp() , e=__UpperCamelCase , d=__UpperCamelCase )
| 129 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = ViTImageProcessor if is_vision_available() else None
@property
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase = (3, 32, 128)
__lowercase = tempfile.mkdtemp()
# fmt: off
__lowercase = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
__lowercase = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + '''\n''' )
__lowercase = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
__lowercase = os.path.join(self.tmpdirname , lowerCamelCase__ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase_ ( self : List[str] , **lowerCamelCase__ : List[Any] ) -> Tuple:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def UpperCAmelCase_ ( self : Any , **lowerCamelCase__ : Dict ) -> Any:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
__lowercase = Image.fromarray(np.moveaxis(lowerCamelCase__ , 0 , -1 ) )
return image_input
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = self.get_tokenizer()
__lowercase = self.get_image_processor()
__lowercase = MgpstrProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
__lowercase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase__ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase = self.get_tokenizer()
__lowercase = self.get_image_processor()
__lowercase = MgpstrProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
__lowercase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowercase = self.get_image_processor(do_normalize=lowerCamelCase__ , padding_value=1.0 )
__lowercase = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowerCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase__ )
def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = MgpstrProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(lowerCamelCase__ , return_tensors='''np''' )
__lowercase = processor(images=lowerCamelCase__ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = MgpstrProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__lowercase = '''test'''
__lowercase = processor(text=lowerCamelCase__ )
__lowercase = tokenizer(lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = MgpstrProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__lowercase = '''test'''
__lowercase = self.prepare_image_inputs()
__lowercase = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = MgpstrProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
__lowercase = processor.char_decode(lowerCamelCase__ )
__lowercase = tokenizer.batch_decode(lowerCamelCase__ )
__lowercase = [seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = MgpstrProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__lowercase = None
__lowercase = self.prepare_image_inputs()
__lowercase = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = MgpstrProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__lowercase = torch.randn(1 , 27 , 38 )
__lowercase = torch.randn(1 , 27 , 50_257 )
__lowercase = torch.randn(1 , 27 , 30_522 )
__lowercase = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 332 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class a :
"""simple docstring"""
UpperCamelCase_ : float
UpperCamelCase_ : TreeNode | None = None
UpperCamelCase_ : TreeNode | None = None
def _A( UpperCamelCase__ : TreeNode | None ) -> bool:
'''simple docstring'''
def is_valid_tree(UpperCamelCase__ : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(UpperCamelCase__ ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
UpperCamelCase__ : TreeNode | None , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , UpperCamelCase__ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , UpperCamelCase__ )
)
return is_binary_search_tree_recursive_check(UpperCamelCase__ , -float('''inf''' ) , float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 332 | 1 |
import os
import sys
lowerCamelCase : Tuple = os.path.join(os.path.dirname(__file__), '''src''')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCamelCase : List[Any] = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def __lowerCAmelCase ( *__snake_case , **__snake_case ):
return AutoConfig.from_pretrained(*lowerCamelCase_ , **lowerCamelCase_ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __lowerCAmelCase ( *__snake_case , **__snake_case ):
return AutoTokenizer.from_pretrained(*lowerCamelCase_ , **lowerCamelCase_ )
@add_start_docstrings(AutoModel.__doc__ )
def __lowerCAmelCase ( *__snake_case , **__snake_case ):
return AutoModel.from_pretrained(*lowerCamelCase_ , **lowerCamelCase_ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __lowerCAmelCase ( *__snake_case , **__snake_case ):
return AutoModelForCausalLM.from_pretrained(*lowerCamelCase_ , **lowerCamelCase_ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __lowerCAmelCase ( *__snake_case , **__snake_case ):
return AutoModelForMaskedLM.from_pretrained(*lowerCamelCase_ , **lowerCamelCase_ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __lowerCAmelCase ( *__snake_case , **__snake_case ):
return AutoModelForSequenceClassification.from_pretrained(*lowerCamelCase_ , **lowerCamelCase_ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __lowerCAmelCase ( *__snake_case , **__snake_case ):
return AutoModelForQuestionAnswering.from_pretrained(*lowerCamelCase_ , **lowerCamelCase_ )
| 702 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def __lowerCAmelCase ( __snake_case ):
__lowerCAmelCase = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$" , __snake_case ).groups()[0]
class _UpperCamelCase (a_ ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None )-> Dict:
__lowerCAmelCase = file_names
__lowerCAmelCase = image_transform
__lowerCAmelCase = label_to_id
def __len__( self )-> Optional[int]:
return len(self.file_names )
def __getitem__( self , __UpperCamelCase )-> Union[str, Any]:
__lowerCAmelCase = self.file_names[idx]
__lowerCAmelCase = PIL.Image.open(__UpperCamelCase )
__lowerCAmelCase = raw_image.convert("RGB" )
if self.image_transform is not None:
__lowerCAmelCase = self.image_transform(__UpperCamelCase )
__lowerCAmelCase = extract_label(__UpperCamelCase )
if self.label_to_id is not None:
__lowerCAmelCase = self.label_to_id[label]
return {"image": image, "label": label}
def __lowerCAmelCase ( __snake_case , __snake_case ):
# Initialize accelerator
if args.with_tracking:
__lowerCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
__lowerCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCAmelCase = config["lr"]
__lowerCAmelCase = int(config["num_epochs"] )
__lowerCAmelCase = int(config["seed"] )
__lowerCAmelCase = int(config["batch_size"] )
__lowerCAmelCase = config["image_size"]
if not isinstance(__snake_case , (list, tuple) ):
__lowerCAmelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
__lowerCAmelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
__lowerCAmelCase = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
__lowerCAmelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
__lowerCAmelCase = os.path.split(__snake_case )[-1].split("." )[0]
accelerator.init_trackers(__snake_case , __snake_case )
# Grab all the image filenames
__lowerCAmelCase = [os.path.join(args.data_dir , __snake_case ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
__lowerCAmelCase = [extract_label(__snake_case ) for fname in file_names]
__lowerCAmelCase = list(set(__snake_case ) )
id_to_label.sort()
__lowerCAmelCase = {lbl: i for i, lbl in enumerate(__snake_case )}
# Set the seed before splitting the data.
np.random.seed(__snake_case )
torch.manual_seed(__snake_case )
torch.cuda.manual_seed_all(__snake_case )
# Split our filenames between train and validation
__lowerCAmelCase = np.random.permutation(len(__snake_case ) )
__lowerCAmelCase = int(0.8 * len(__snake_case ) )
__lowerCAmelCase = random_perm[:cut]
__lowerCAmelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
__lowerCAmelCase = Compose([RandomResizedCrop(__snake_case , scale=(0.5, 1.0) ), ToTensor()] )
__lowerCAmelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=__snake_case , label_to_id=__snake_case )
# For evaluation, we use a deterministic Resize
__lowerCAmelCase = Compose([Resize(__snake_case ), ToTensor()] )
__lowerCAmelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=__snake_case , label_to_id=__snake_case )
# Instantiate dataloaders.
__lowerCAmelCase = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
__lowerCAmelCase = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCAmelCase = create_model("resnet50d" , pretrained=__snake_case , num_classes=len(__snake_case ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCAmelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
__lowerCAmelCase = False
for param in model.get_classifier().parameters():
__lowerCAmelCase = True
# We normalize the batches of images to be a bit faster.
__lowerCAmelCase = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
__lowerCAmelCase = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
__lowerCAmelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
__lowerCAmelCase = OneCycleLR(optimizer=__snake_case , max_lr=__snake_case , epochs=__snake_case , steps_per_epoch=len(__snake_case ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# We need to keep track of how many total steps we have iterated over
__lowerCAmelCase = 0
# We also need to keep track of the starting epoch so files are named properly
__lowerCAmelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
__lowerCAmelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
__lowerCAmelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
__lowerCAmelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
__lowerCAmelCase = os.path.splitext(__snake_case )[0]
if "epoch" in training_difference:
__lowerCAmelCase = int(training_difference.replace("epoch_" , "" ) ) + 1
__lowerCAmelCase = None
else:
__lowerCAmelCase = int(training_difference.replace("step_" , "" ) )
__lowerCAmelCase = resume_step // len(__snake_case )
resume_step -= starting_epoch * len(__snake_case )
# Now we train the model
for epoch in range(__snake_case , __snake_case ):
model.train()
if args.with_tracking:
__lowerCAmelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
__lowerCAmelCase = accelerator.skip_first_batches(__snake_case , __snake_case )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
__lowerCAmelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
__lowerCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
__lowerCAmelCase = (batch["image"] - mean) / std
__lowerCAmelCase = model(__snake_case )
__lowerCAmelCase = torch.nn.functional.cross_entropy(__snake_case , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__snake_case , __snake_case ):
__lowerCAmelCase = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
__lowerCAmelCase = os.path.join(args.output_dir , __snake_case )
accelerator.save_state(__snake_case )
model.eval()
__lowerCAmelCase = 0
__lowerCAmelCase = 0
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
__lowerCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
__lowerCAmelCase = (batch["image"] - mean) / std
with torch.no_grad():
__lowerCAmelCase = model(__snake_case )
__lowerCAmelCase = outputs.argmax(dim=-1 )
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch["label"]) )
__lowerCAmelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
__lowerCAmelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(__snake_case ),
"epoch": epoch,
} , step=__snake_case , )
if checkpointing_steps == "epoch":
__lowerCAmelCase = F"""epoch_{epoch}"""
if args.output_dir is not None:
__lowerCAmelCase = os.path.join(args.output_dir , __snake_case )
accelerator.save_state(__snake_case )
if args.with_tracking:
accelerator.end_training()
def __lowerCAmelCase ( ):
__lowerCAmelCase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=__snake_case , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=__snake_case , default=__snake_case , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=__snake_case , default=__snake_case , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=__snake_case , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=__snake_case , default=__snake_case , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=__snake_case , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 290 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : List[str] = ["image_processor", "tokenizer"]
a__ : Optional[Any] = "LayoutLMv3ImageProcessor"
a__ : Union[str, Any] = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self : str , __lowerCAmelCase : Any=None , __lowerCAmelCase : Any=None , **__lowerCAmelCase : Optional[int] ) -> Any:
_A = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowerCAmelCase , )
_A = kwargs.pop('''feature_extractor''' )
_A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
def __call__( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __lowerCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , __lowerCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , __lowerCAmelCase : bool = True , __lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , __lowerCAmelCase : Union[bool, str, TruncationStrategy] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : int = 0 , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , **__lowerCAmelCase : int , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
_A = self.image_processor(images=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_A = [text] # add batch dimension (as the image processor always adds a batch dimension)
_A = features['''words''']
_A = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
# add pixel values
_A = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
_A = self.get_overflowing_images(__lowerCAmelCase , encoded_inputs['''overflow_to_sample_mapping'''] )
_A = images
return encoded_inputs
def snake_case_ ( self : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] ) -> int:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_A = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(__lowerCAmelCase )} and {len(__lowerCAmelCase )}''' )
return images_with_overflow
def snake_case_ ( self : List[Any] , *__lowerCAmelCase : str , **__lowerCAmelCase : str ) -> Optional[int]:
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : Optional[int] , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Optional[Any] ) -> str:
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def snake_case_ ( self : List[str] ) -> Any:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def snake_case_ ( self : Optional[Any] ) -> List[str]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCAmelCase , )
return self.image_processor_class
@property
def snake_case_ ( self : List[Any] ) -> Tuple:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __lowerCAmelCase , )
return self.image_processor
| 2 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class lowerCAmelCase__ ( a , a ):
"""simple docstring"""
@register_to_config
def __init__( self : int , __SCREAMING_SNAKE_CASE : int = 768 , ) -> int:
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE = nn.Parameter(torch.zeros(1 , __SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = nn.Parameter(torch.ones(1 , __SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Union[str, torch.device]] = None , __SCREAMING_SNAKE_CASE : Optional[torch.dtype] = None , ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = nn.Parameter(self.mean.to(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = nn.Parameter(self.std.to(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE ) )
return self
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = (embeds - self.mean) * 1.0 / self.std
return embeds
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = (embeds * self.std) + self.mean
return embeds
| 627 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class snake_case__ ( unittest.TestCase ):
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase : str = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase : Dict = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCAmelCase : Tuple = dict(zip(lowercase , range(len(lowercase ) ) ) )
UpperCAmelCase : Tuple = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
UpperCAmelCase : List[Any] = {"unk_token": "<unk>"}
UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase ) )
UpperCAmelCase : Union[str, Any] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
UpperCAmelCase : int = os.path.join(self.tmpdirname , lowercase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowercase , lowercase )
def __lowerCAmelCase ( self : Union[str, Any] , **lowercase : List[Any] ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def __lowerCAmelCase ( self : Optional[Any] , **lowercase : Any ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase )
def __lowerCAmelCase ( self : List[Any] , **lowercase : str ):
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **lowercase )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
UpperCAmelCase : Tuple = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase : int = self.get_rust_tokenizer()
UpperCAmelCase : Dict = self.get_image_processor()
UpperCAmelCase : str = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
processor_slow.save_pretrained(self.tmpdirname )
UpperCAmelCase : Tuple = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase )
UpperCAmelCase : str = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
processor_fast.save_pretrained(self.tmpdirname )
UpperCAmelCase : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase )
self.assertIsInstance(processor_fast.tokenizer , lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase )
self.assertIsInstance(processor_fast.image_processor , lowercase )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase : Dict = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase : Optional[int] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCAmelCase : Tuple = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
UpperCAmelCase : Optional[int] = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.get_image_processor()
UpperCAmelCase : List[str] = self.get_tokenizer()
UpperCAmelCase : Tuple = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
UpperCAmelCase : Optional[Any] = self.prepare_image_inputs()
UpperCAmelCase : List[Any] = image_processor(lowercase , return_tensors="np" )
UpperCAmelCase : Dict = processor(images=lowercase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.get_image_processor()
UpperCAmelCase : Optional[Any] = self.get_tokenizer()
UpperCAmelCase : Optional[int] = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
UpperCAmelCase : Union[str, Any] = "lower newer"
UpperCAmelCase : List[Any] = processor(text=lowercase )
UpperCAmelCase : List[Any] = tokenizer(lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase : List[Any] = self.get_image_processor()
UpperCAmelCase : Optional[int] = self.get_tokenizer()
UpperCAmelCase : Optional[Any] = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
UpperCAmelCase : Optional[int] = "lower newer"
UpperCAmelCase : Optional[int] = self.prepare_image_inputs()
UpperCAmelCase : str = processor(text=lowercase , images=lowercase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase : Any = self.get_image_processor()
UpperCAmelCase : Optional[Any] = self.get_tokenizer()
UpperCAmelCase : Optional[Any] = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
UpperCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase : Dict = processor.batch_decode(lowercase )
UpperCAmelCase : List[Any] = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase : int = self.get_image_processor()
UpperCAmelCase : List[Any] = self.get_tokenizer()
UpperCAmelCase : int = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
UpperCAmelCase : str = "lower newer"
UpperCAmelCase : Dict = self.prepare_image_inputs()
UpperCAmelCase : Any = processor(text=lowercase , images=lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 292 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class snake_case__ :
SCREAMING_SNAKE_CASE__ = BlenderbotSmallConfig
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = '''gelu'''
def __init__( self : Tuple , lowercase : Any , lowercase : Optional[Any]=13 , lowercase : Any=7 , lowercase : List[Any]=True , lowercase : List[str]=False , lowercase : Optional[Any]=99 , lowercase : Union[str, Any]=32 , lowercase : List[Any]=2 , lowercase : Tuple=4 , lowercase : Union[str, Any]=37 , lowercase : str=0.1 , lowercase : Optional[Any]=0.1 , lowercase : Optional[Any]=20 , lowercase : str=2 , lowercase : int=1 , lowercase : Optional[int]=0 , ):
'''simple docstring'''
UpperCAmelCase : Dict = parent
UpperCAmelCase : Union[str, Any] = batch_size
UpperCAmelCase : Tuple = seq_length
UpperCAmelCase : Dict = is_training
UpperCAmelCase : str = use_labels
UpperCAmelCase : Union[str, Any] = vocab_size
UpperCAmelCase : Tuple = hidden_size
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : Union[str, Any] = num_attention_heads
UpperCAmelCase : Dict = intermediate_size
UpperCAmelCase : Optional[int] = hidden_dropout_prob
UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase : Dict = max_position_embeddings
UpperCAmelCase : List[Any] = eos_token_id
UpperCAmelCase : List[Any] = pad_token_id
UpperCAmelCase : Union[str, Any] = bos_token_id
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : int = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase : Optional[Any] = prepare_blenderbot_small_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def __lowerCAmelCase ( self : int , lowercase : List[Any] , lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase : List[Any] = TFBlenderbotSmallModel(config=lowercase ).get_decoder()
UpperCAmelCase : List[Any] = inputs_dict["input_ids"]
UpperCAmelCase : int = input_ids[:1, :]
UpperCAmelCase : Union[str, Any] = inputs_dict["attention_mask"][:1, :]
UpperCAmelCase : Union[str, Any] = inputs_dict["head_mask"]
UpperCAmelCase : List[Any] = 1
# first forward pass
UpperCAmelCase : Optional[Any] = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase )
UpperCAmelCase , UpperCAmelCase : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase : Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase : str = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase : Optional[Any] = model(lowercase , attention_mask=lowercase )[0]
UpperCAmelCase : List[str] = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase : List[str] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase : Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase : Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
def lowercase_ ( _lowercase : Optional[Any] , _lowercase : Optional[int] , _lowercase : Tuple , _lowercase : Tuple=None , _lowercase : Any=None , _lowercase : str=None , _lowercase : Optional[Any]=None , _lowercase : Dict=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase : List[str] = tf.cast(tf.math.not_equal(_lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class snake_case__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
SCREAMING_SNAKE_CASE__ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ = (
{
'''conversational''': TFBlenderbotSmallForConditionalGeneration,
'''feature-extraction''': TFBlenderbotSmallModel,
'''summarization''': TFBlenderbotSmallForConditionalGeneration,
'''text2text-generation''': TFBlenderbotSmallForConditionalGeneration,
'''translation''': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase : Dict = TFBlenderbotSmallModelTester(self )
UpperCAmelCase : Dict = ConfigTester(self , config_class=lowercase )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
@require_tokenizers
@require_tf
class snake_case__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = [
'''Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '''
''' i\'m going to throw up.\nand why is that?'''
]
SCREAMING_SNAKE_CASE__ = '''facebook/blenderbot_small-90M'''
@cached_property
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase : List[Any] = self.tokenizer(self.src_text , return_tensors="tf" )
UpperCAmelCase : List[Any] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , )
UpperCAmelCase : Dict = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 292 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : List[str] = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
_a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 447 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_a : List[Any] = logging.getLogger(__name__)
@dataclass
class lowercase_ :
'''simple docstring'''
__lowerCAmelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__lowerCAmelCase : Optional[str] = field(
default=a , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__lowerCAmelCase : Optional[str] = field(
default="NER" , metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} )
__lowerCAmelCase : Optional[str] = field(
default=a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__lowerCAmelCase : bool = field(default=a , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__lowerCAmelCase : Optional[str] = field(
default=a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class lowercase_ :
'''simple docstring'''
__lowerCAmelCase : str = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} )
__lowerCAmelCase : Optional[str] = field(
default=a , metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} , )
__lowerCAmelCase : int = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__lowerCAmelCase : bool = field(
default=a , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowerCamelCase__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
UpperCAmelCase = import_module('tasks' )
try:
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE , model_args.task_type )
UpperCAmelCase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
UpperCAmelCase = token_classification_task.get_labels(data_args.labels )
UpperCAmelCase = dict(enumerate(SCREAMING_SNAKE_CASE ) )
UpperCAmelCase = len(SCREAMING_SNAKE_CASE )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE )} , cache_dir=model_args.cache_dir , )
UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
UpperCAmelCase = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCAmelCase = (
TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCAmelCase = (
TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : np.ndarray ) -> Tuple[List[int], List[int]]:
UpperCAmelCase = np.argmax(SCREAMING_SNAKE_CASE , axis=2 )
UpperCAmelCase , UpperCAmelCase = preds.shape
UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE )]
UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(SCREAMING_SNAKE_CASE : EvalPrediction ) -> Dict:
UpperCAmelCase , UpperCAmelCase = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
"precision": precision_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
"recall": recall_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
"f1": fa_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
}
# Data collator
UpperCAmelCase = DataCollatorWithPadding(SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCAmelCase = Trainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , compute_metrics=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
UpperCAmelCase = trainer.evaluate()
UpperCAmelCase = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
writer.write('%s = %s\n' % (key, value) )
results.update(SCREAMING_SNAKE_CASE )
# Predict
if training_args.do_predict:
UpperCAmelCase = TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = trainer.predict(SCREAMING_SNAKE_CASE )
UpperCAmelCase , UpperCAmelCase = align_predictions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCAmelCase = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
UpperCAmelCase = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return results
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 447 | 1 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class UpperCAmelCase ( __a ):
'''simple docstring'''
def __init__( self , lowercase__=0.0_1 , lowercase__=1_000 ) -> List[str]:
SCREAMING_SNAKE_CASE : str = p_stop
SCREAMING_SNAKE_CASE : Any = max_length
def __iter__( self ) -> List[str]:
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : str = False
while not stop and count < self.max_length:
yield count
count += 1
SCREAMING_SNAKE_CASE : List[Any] = random.random() < self.p_stop
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__=False , lowercase__=True ) -> str:
SCREAMING_SNAKE_CASE : Optional[Any] = [
BatchSamplerShard(lowerCAmelCase_ , 2 , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
for i in range(2 )
]
SCREAMING_SNAKE_CASE : Optional[int] = [list(lowerCAmelCase_ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(lowerCAmelCase_ ) for shard in batch_sampler_shards] , [len(lowerCAmelCase_ ) for e in expected] )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : List[str] = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
SCREAMING_SNAKE_CASE : Tuple = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE : Any = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE : Tuple = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE : Optional[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : int = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : int = [[], []]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE : Optional[int] = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size.
SCREAMING_SNAKE_CASE : Tuple = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
SCREAMING_SNAKE_CASE : Any = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE : Optional[int] = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : str = [[], []]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
SCREAMING_SNAKE_CASE : Optional[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE : Optional[int] = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE : str = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE : Tuple = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : int = [[[0, 1]], []]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = [[], []]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : List[str] = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size.
SCREAMING_SNAKE_CASE : int = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
SCREAMING_SNAKE_CASE : Optional[int] = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE : Optional[int] = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = [[], []]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE : Union[str, Any] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
SCREAMING_SNAKE_CASE : str = [BatchSamplerShard(lowerCAmelCase_ , 2 , lowerCAmelCase_ , even_batches=lowerCAmelCase_ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__=False , lowercase__=2 , lowercase__=False ) -> int:
random.seed(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = list(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : str = [
IterableDatasetShard(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , drop_last=lowerCAmelCase_ , num_processes=lowerCAmelCase_ , process_index=lowerCAmelCase_ , split_batches=lowerCAmelCase_ , )
for i in range(lowerCAmelCase_ )
]
SCREAMING_SNAKE_CASE : Optional[Any] = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(lowerCAmelCase_ )
iterable_dataset_lists.append(list(lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE : Any = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
SCREAMING_SNAKE_CASE : List[str] = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
self.assertTrue(len(lowerCAmelCase_ ) % shard_batch_size == 0 )
SCREAMING_SNAKE_CASE : Dict = []
for idx in range(0 , len(lowerCAmelCase_ ) , lowerCAmelCase_ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(lowerCAmelCase_ ) < len(lowerCAmelCase_ ):
reference += reference
self.assertListEqual(lowerCAmelCase_ , reference[: len(lowerCAmelCase_ )] )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : str = 42
SCREAMING_SNAKE_CASE : Tuple = RandomIterableDataset()
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
# Edge case with a very small dataset
SCREAMING_SNAKE_CASE : Dict = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE : int = BatchSampler(range(16 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : int = SkipBatchSampler(lowerCAmelCase_ , 2 )
self.assertListEqual(list(lowerCAmelCase_ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE : List[Any] = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : int = DataLoader(list(range(16 ) ) , batch_size=4 )
SCREAMING_SNAKE_CASE : Dict = skip_first_batches(lowerCAmelCase_ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE : List[Any] = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(lowerCAmelCase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowerCAmelCase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def _UpperCamelCase ( self ) -> int:
Accelerator()
SCREAMING_SNAKE_CASE : int = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(lowerCAmelCase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowerCAmelCase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 719 | '''simple docstring'''
from __future__ import annotations
from statistics import mean
def __lowerCAmelCase ( a_ , a_ , a_ ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [0] * no_of_processes
SCREAMING_SNAKE_CASE : Optional[Any] = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(a_ ):
SCREAMING_SNAKE_CASE : Tuple = burst_time[i]
SCREAMING_SNAKE_CASE : list[int] = []
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Optional[Any] = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Dict = -1
for i in range(a_ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(a_ )
if len(a_ ) > 0:
SCREAMING_SNAKE_CASE : Dict = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
SCREAMING_SNAKE_CASE : Union[str, Any] = i
total_time += burst_time[target_process]
completed += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Tuple = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def __lowerCAmelCase ( a_ , a_ , a_ ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = [0] * no_of_processes
for i in range(a_ ):
SCREAMING_SNAKE_CASE : List[Any] = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
_lowerCAmelCase :Optional[int] = 4
_lowerCAmelCase :Optional[int] = [2, 5, 3, 7]
_lowerCAmelCase :List[str] = [0, 0, 0, 0]
_lowerCAmelCase :Union[str, Any] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_lowerCAmelCase :Any = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
f"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
f"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(f"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(f"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 179 | 0 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = "informer"
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self: Tuple , _lowerCamelCase: Optional[int] = None , _lowerCamelCase: Optional[int] = None , _lowerCamelCase: str = "student_t" , _lowerCamelCase: str = "nll" , _lowerCamelCase: int = 1 , _lowerCamelCase: List[int] = None , _lowerCamelCase: Optional[Union[str, bool]] = "mean" , _lowerCamelCase: int = 0 , _lowerCamelCase: int = 0 , _lowerCamelCase: int = 0 , _lowerCamelCase: int = 0 , _lowerCamelCase: Optional[List[int]] = None , _lowerCamelCase: Optional[List[int]] = None , _lowerCamelCase: int = 64 , _lowerCamelCase: int = 32 , _lowerCamelCase: int = 32 , _lowerCamelCase: int = 2 , _lowerCamelCase: int = 2 , _lowerCamelCase: int = 2 , _lowerCamelCase: int = 2 , _lowerCamelCase: bool = True , _lowerCamelCase: str = "gelu" , _lowerCamelCase: float = 0.05 , _lowerCamelCase: float = 0.1 , _lowerCamelCase: float = 0.1 , _lowerCamelCase: float = 0.1 , _lowerCamelCase: float = 0.1 , _lowerCamelCase: int = 1_00 , _lowerCamelCase: float = 0.02 , _lowerCamelCase: List[str]=True , _lowerCamelCase: str = "prob" , _lowerCamelCase: int = 5 , _lowerCamelCase: bool = True , **_lowerCamelCase: Tuple , ):
# time series specific configuration
SCREAMING_SNAKE_CASE_ = prediction_length
SCREAMING_SNAKE_CASE_ = context_length or prediction_length
SCREAMING_SNAKE_CASE_ = distribution_output
SCREAMING_SNAKE_CASE_ = loss
SCREAMING_SNAKE_CASE_ = input_size
SCREAMING_SNAKE_CASE_ = num_time_features
SCREAMING_SNAKE_CASE_ = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
SCREAMING_SNAKE_CASE_ = scaling
SCREAMING_SNAKE_CASE_ = num_dynamic_real_features
SCREAMING_SNAKE_CASE_ = num_static_real_features
SCREAMING_SNAKE_CASE_ = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
SCREAMING_SNAKE_CASE_ = cardinality
else:
SCREAMING_SNAKE_CASE_ = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
SCREAMING_SNAKE_CASE_ = embedding_dimension
else:
SCREAMING_SNAKE_CASE_ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
SCREAMING_SNAKE_CASE_ = num_parallel_samples
# Transformer architecture configuration
SCREAMING_SNAKE_CASE_ = input_size * len(self.lags_sequence ) + self._number_of_features
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = encoder_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = encoder_layerdrop
SCREAMING_SNAKE_CASE_ = decoder_layerdrop
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = use_cache
# Informer
SCREAMING_SNAKE_CASE_ = attention_type
SCREAMING_SNAKE_CASE_ = sampling_factor
SCREAMING_SNAKE_CASE_ = distil
super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase )
@property
def _A ( self: List[Any] ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 234 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = "Speech2TextFeatureExtractor"
SCREAMING_SNAKE_CASE__ : List[str] = "Speech2TextTokenizer"
def __init__( self: List[str] , _lowerCamelCase: str , _lowerCamelCase: Optional[Any] ):
super().__init__(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
def __call__( self: List[str] , *_lowerCamelCase: Dict , **_lowerCamelCase: List[str] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowerCamelCase , **_lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''raw_speech''' )
else:
SCREAMING_SNAKE_CASE_ = kwargs.pop('''audio''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''sampling_rate''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''text''' , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
SCREAMING_SNAKE_CASE_ = args[0]
SCREAMING_SNAKE_CASE_ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
SCREAMING_SNAKE_CASE_ = self.feature_extractor(_lowerCamelCase , *_lowerCamelCase , sampling_rate=_lowerCamelCase , **_lowerCamelCase )
if text is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer(_lowerCamelCase , **_lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ = encodings['''input_ids''']
return inputs
def _A ( self: List[str] , *_lowerCamelCase: List[Any] , **_lowerCamelCase: Union[str, Any] ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def _A ( self: Union[str, Any] , *_lowerCamelCase: str , **_lowerCamelCase: Optional[Any] ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@contextmanager
def _A ( self: List[Any] ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.tokenizer
yield
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
| 234 | 1 |
from ...processing_utils import ProcessorMixin
class __magic_name__ ( A__ ):
lowercase : List[str] ='''WhisperFeatureExtractor'''
lowercase : Any ='''WhisperTokenizer'''
def __init__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ) -> int:
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = self.feature_extractor
UpperCAmelCase = False
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCamelCase__ : int=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Dict=True ) -> Dict:
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=UpperCamelCase__ , language=UpperCamelCase__ , no_timestamps=UpperCamelCase__ )
def __call__( self : Dict , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Any ) -> Dict:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*UpperCamelCase__ , **UpperCamelCase__ )
UpperCAmelCase = kwargs.pop("audio" , UpperCamelCase__ )
UpperCAmelCase = kwargs.pop("sampling_rate" , UpperCamelCase__ )
UpperCAmelCase = kwargs.pop("text" , UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
UpperCAmelCase = args[0]
UpperCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
UpperCAmelCase = self.feature_extractor(UpperCamelCase__ , *UpperCamelCase__ , sampling_rate=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None:
UpperCAmelCase = self.tokenizer(UpperCamelCase__ , **UpperCamelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCAmelCase = encodings["input_ids"]
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Tuple , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : int ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : int , *UpperCamelCase__ : Dict , **UpperCamelCase__ : List[Any] ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Any="np" ) -> List[str]:
'''simple docstring'''
return self.tokenizer.get_prompt_ids(UpperCamelCase__ , return_tensors=UpperCamelCase__ )
| 703 |
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> list[int]:
UpperCAmelCase = int(lowerCamelCase_ )
# Initialize Result
UpperCAmelCase = []
# Traverse through all denomination
for denomination in reversed(lowerCamelCase_ ):
# Find denominations
while int(lowerCamelCase_ ) >= int(lowerCamelCase_ ):
total_value -= int(lowerCamelCase_ )
answer.append(lowerCamelCase_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = []
__lowerCamelCase : Dict = "0"
if (
input("Do you want to enter your denominations ? (yY/n): ").strip().lower()
== "y"
):
__lowerCamelCase : List[Any] = int(input("Enter the number of denominations you want to add: ").strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
__lowerCamelCase : str = input("Enter the change you want to make in Indian Currency: ").strip()
else:
# All denominations of Indian Currency if user does not enter
__lowerCamelCase : Optional[int] = [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
__lowerCamelCase : Optional[int] = input("Enter the change you want to make: ").strip()
if int(value) == 0 or int(value) < 0:
print("The total value cannot be zero or negative.")
else:
print(F'''Following is minimal change for {value}: ''')
__lowerCamelCase : Union[str, Any] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=" ")
| 457 | 0 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
def __init__( self, *__a, **__a):
'''simple docstring'''
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead.", __a, )
super().__init__(*__a, **__a)
| 500 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_snake_case = 16
_snake_case = 32
def A ( _lowerCamelCase , _lowerCamelCase = 16 ):
'''simple docstring'''
_lowerCAmelCase : int = AutoTokenizer.from_pretrained("bert-base-cased" )
_lowerCAmelCase : List[Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(_lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
_lowerCAmelCase : List[str] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCamelCase , max_length=_lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowerCAmelCase : List[str] = datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCAmelCase : Any = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowerCAmelCase : Optional[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowerCAmelCase : Optional[int] = 16
elif accelerator.mixed_precision != "no":
_lowerCAmelCase : str = 8
else:
_lowerCAmelCase : int = None
return tokenizer.pad(
_lowerCamelCase , padding="longest" , max_length=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_tensors="pt" , )
# Instantiate dataloaders.
_lowerCAmelCase : Optional[Any] = DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
_lowerCAmelCase : Dict = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_snake_case = mocked_dataloaders # noqa: F811
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _lowerCamelCase ) == "1":
_lowerCAmelCase : str = 2
# New Code #
_lowerCAmelCase : Optional[Any] = int(args.gradient_accumulation_steps )
# Initialize accelerator
_lowerCAmelCase : int = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_lowerCamelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCAmelCase : Union[str, Any] = config["lr"]
_lowerCAmelCase : List[Any] = int(config["num_epochs"] )
_lowerCAmelCase : str = int(config["seed"] )
_lowerCAmelCase : str = int(config["batch_size"] )
_lowerCAmelCase : int = evaluate.load("glue" , "mrpc" )
set_seed(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Any = get_dataloaders(_lowerCamelCase , _lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCAmelCase : Dict = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCAmelCase : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
_lowerCAmelCase : Dict = AdamW(params=model.parameters() , lr=_lowerCamelCase )
# Instantiate scheduler
_lowerCAmelCase : Any = get_linear_schedule_with_warmup(
optimizer=_lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Now we train the model
for epoch in range(_lowerCamelCase ):
model.train()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = model(**_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = output.loss
accelerator.backward(_lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCAmelCase : str = model(**_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = outputs.logits.argmax(dim=-1 )
_lowerCAmelCase , _lowerCAmelCase : str = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_lowerCamelCase , references=_lowerCamelCase , )
_lowerCAmelCase : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , _lowerCamelCase )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_lowerCamelCase , default=_lowerCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=_lowerCamelCase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
_lowerCAmelCase : str = parser.parse_args()
_lowerCAmelCase : List[Any] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 500 | 1 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class lowercase ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
lowerCamelCase : Dict = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def a ( ) -> str:
'''simple docstring'''
if os.name == "nt":
SCREAMING_SNAKE_CASE__ : str = CursorInfo()
SCREAMING_SNAKE_CASE__ : List[Any] = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(A__ , ctypes.byref(A__ ) )
SCREAMING_SNAKE_CASE__ : Tuple = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(A__ , ctypes.byref(A__ ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def a ( ) -> Optional[int]:
'''simple docstring'''
if os.name == "nt":
SCREAMING_SNAKE_CASE__ : Tuple = CursorInfo()
SCREAMING_SNAKE_CASE__ : List[Any] = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(A__ , ctypes.byref(A__ ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(A__ , ctypes.byref(A__ ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def a ( ) -> List[Any]:
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 711 |
from __future__ import annotations
a_ :str = 8.988e9 # units = N * m^s * C^-2
def a ( A__ , A__ , A__ , A__ ) -> dict[str, float]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
SCREAMING_SNAKE_CASE__ : Dict = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
SCREAMING_SNAKE_CASE__ : Tuple = abs(A__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
SCREAMING_SNAKE_CASE__ : Optional[int] = abs(A__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
SCREAMING_SNAKE_CASE__ : List[Any] = (COULOMBS_CONSTANT * charge_product / abs(A__ )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250 | 0 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase):
@property
def _snake_case ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _snake_case ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
a__ :List[str] = ort.SessionOptions()
a__ :Dict = False
return options
def _snake_case ( self : Tuple ) ->Dict:
"""simple docstring"""
a__ :List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
a__ :str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
a__ :Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy" )
# using the PNDM scheduler by default
a__ :List[str] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=__A , feature_extractor=__A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__A )
a__ :List[str] = "A red cat sitting on a park bench"
a__ :Tuple = np.random.RandomState(0 )
a__ :Optional[int] = pipe(
prompt=__A , image=__A , mask_image=__A , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=__A , output_type="np" , )
a__ :Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 395 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = r'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class lowerCAmelCase_ ( _a):
@add_start_docstrings(__A )
def __call__( self : Dict , __A : torch.LongTensor , __A : torch.FloatTensor , **__A : Union[str, Any] ) ->bool:
"""simple docstring"""
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class lowerCAmelCase_ ( _a):
def __init__( self : Dict , __A : int , __A : Optional[int] = None ) ->Dict:
"""simple docstring"""
a__ :List[str] = max_length
a__ :Union[str, Any] = max_position_embeddings
@add_start_docstrings(__A )
def __call__( self : int , __A : torch.LongTensor , __A : torch.FloatTensor , **__A : List[str] ) ->bool:
"""simple docstring"""
a__ :Optional[int] = input_ids.shape[-1]
a__ :Dict = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
"exceptions, performance degradation, or nothing at all." )
return is_done
class lowerCAmelCase_ ( _a):
def __init__( self : Any , __A : int , __A : int ) ->Optional[int]:
"""simple docstring"""
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
"with `max_length = start_length + max_new_tokens` instead." , __A , )
a__ :Tuple = start_length
a__ :List[Any] = max_new_tokens
a__ :int = start_length + max_new_tokens
@add_start_docstrings(__A )
def __call__( self : Any , __A : torch.LongTensor , __A : torch.FloatTensor , **__A : List[str] ) ->bool:
"""simple docstring"""
return input_ids.shape[-1] >= self.max_length
class lowerCAmelCase_ ( _a):
def __init__( self : Any , __A : float , __A : Optional[float] = None ) ->Tuple:
"""simple docstring"""
a__ :Any = max_time
a__ :List[str] = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(__A )
def __call__( self : Any , __A : torch.LongTensor , __A : torch.FloatTensor , **__A : Union[str, Any] ) ->bool:
"""simple docstring"""
return time.time() - self.initial_timestamp > self.max_time
class lowerCAmelCase_ ( _a):
@add_start_docstrings(__A )
def __call__( self : List[str] , __A : torch.LongTensor , __A : torch.FloatTensor , **__A : Optional[Any] ) ->bool:
"""simple docstring"""
return any(criteria(__A , __A ) for criteria in self )
@property
def _snake_case ( self : int ) ->Optional[int]:
"""simple docstring"""
for stopping_criterium in self:
if isinstance(__A , __A ):
return stopping_criterium.max_length
elif isinstance(__A , __A ):
return stopping_criterium.max_length
return None
def lowerCamelCase__ ( a : StoppingCriteriaList , a : int ) -> StoppingCriteriaList:
"""simple docstring"""
a__ :Tuple = stopping_criteria.max_length
a__ :str = deepcopy(a )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , a )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=a ) )
return new_stopping_criteria
| 395 | 1 |
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: Optional[Any] = [10, 20, 30, 40, 50, 60]
a__: Any = [2, 4, 6, 8, 10, 12]
a__: Optional[Any] = 1_00
self.assertEqual(kp.calc_profit(lowercase , lowercase , lowercase) , 2_10)
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(lowercase , 'max_weight must greater than zero.')
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
self.assertRaisesRegex(lowercase , 'Weight can not be negative.')
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
self.assertRaisesRegex(lowercase , 'Profit can not be negative.')
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
self.assertRaisesRegex(lowercase , 'max_weight must greater than zero.')
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
self.assertRaisesRegex(
lowercase , 'The length of profit and weight must be same.')
if __name__ == "__main__":
unittest.main()
| 217 | """simple docstring"""
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 10**-10 ) ->float:
a__: int = a
while True:
a__: Optional[Any] = Decimal(_SCREAMING_SNAKE_CASE ) - (
Decimal(eval(_SCREAMING_SNAKE_CASE ) ) / Decimal(eval(str(diff(_SCREAMING_SNAKE_CASE ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_SCREAMING_SNAKE_CASE ) ) < precision: # noqa: S307
return float(_SCREAMING_SNAKE_CASE )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
print(f"The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}")
# Find Square Root of 5
print(f"The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}")
# Exponential Roots
print(f"The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}")
| 217 | 1 |
from math import factorial, pi
def __lowerCAmelCase ( _UpperCamelCase : float , _UpperCamelCase : int = 30 ) -> float:
'''simple docstring'''
if not isinstance(_UpperCamelCase , (int, float) ):
raise ValueError('maclaurin_sin() requires either an int or float for theta' )
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or accuracy <= 0:
raise ValueError('maclaurin_sin() requires a positive int for accuracy' )
SCREAMING_SNAKE_CASE = float(_UpperCamelCase )
SCREAMING_SNAKE_CASE = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(_UpperCamelCase ) )
def __lowerCAmelCase ( _UpperCamelCase : float , _UpperCamelCase : int = 30 ) -> float:
'''simple docstring'''
if not isinstance(_UpperCamelCase , (int, float) ):
raise ValueError('maclaurin_cos() requires either an int or float for theta' )
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or accuracy <= 0:
raise ValueError('maclaurin_cos() requires a positive int for accuracy' )
SCREAMING_SNAKE_CASE = float(_UpperCamelCase )
SCREAMING_SNAKE_CASE = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(_UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 439 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
a_ : str = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Dict=None , _UpperCamelCase : str=None ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = True
while ask_again:
SCREAMING_SNAKE_CASE = input(_UpperCamelCase )
try:
if default is not None and len(_UpperCamelCase ) == 0:
return default
return convert_value(_UpperCamelCase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any]=[] , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Optional[Any]=0 ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BulletMenu(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = menu.run(default_choice=_UpperCamelCase )
return convert_value(_UpperCamelCase ) if convert_value is not None else result
def __lowerCAmelCase ( _UpperCamelCase : List[str] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = int(_UpperCamelCase )
return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value] )
def __lowerCAmelCase ( _UpperCamelCase : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = int(_UpperCamelCase )
return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value] )
def __lowerCAmelCase ( _UpperCamelCase : List[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = int(_UpperCamelCase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def __lowerCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = int(_UpperCamelCase )
return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value] )
def __lowerCAmelCase ( _UpperCamelCase : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = int(_UpperCamelCase )
return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value] )
def __lowerCAmelCase ( _UpperCamelCase : Dict ) -> str:
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class UpperCamelCase ( argparse.RawDescriptionHelpFormatter ):
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = super()._format_usage(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = usage.replace('<command> [<args>] ' , '' )
return usage
| 439 | 1 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__UpperCamelCase : List[Any] = NewType("""DataClass""", Any)
__UpperCamelCase : str = NewType("""DataClassType""", Any)
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if isinstance(lowerCamelCase , lowerCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = {str(lowerCamelCase ): choice for choice in choices}
return lambda lowerCamelCase : str_to_choice.get(lowerCamelCase , lowerCamelCase )
def snake_case ( *,
lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = dataclasses.MISSING , lowerCamelCase = dataclasses.MISSING , lowerCamelCase = None , **lowerCamelCase , ):
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__lowercase = {}
if aliases is not None:
__lowercase = aliases
if help is not None:
__lowercase = help
return dataclasses.field(metadata=lowerCamelCase , default=lowerCamelCase , default_factory=lowerCamelCase , **lowerCamelCase )
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Iterable[DataClassType]
def __init__( self : Union[str, Any] , _lowerCAmelCase : Union[DataClassType, Iterable[DataClassType]] , **_lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if "formatter_class" not in kwargs:
__lowercase = ArgumentDefaultsHelpFormatter
super().__init__(**_lowerCAmelCase )
if dataclasses.is_dataclass(_lowerCAmelCase ):
__lowercase = [dataclass_types]
__lowercase = list(_lowerCAmelCase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_lowerCAmelCase )
@staticmethod
def _a ( _lowerCAmelCase : ArgumentParser , _lowerCAmelCase : dataclasses.Field ) -> int:
"""simple docstring"""
__lowercase = F'--{field.name}'
__lowercase = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _lowerCAmelCase ):
raise RuntimeError(
"""Unresolved type detected, which should have been done with the help of """
"""`typing.get_type_hints` method by default""" )
__lowercase = kwargs.pop("""aliases""" , [] )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__lowercase = [aliases]
__lowercase = getattr(field.type , """__origin__""" , field.type )
if origin_type is Union or (hasattr(_lowerCAmelCase , """UnionType""" ) and isinstance(_lowerCAmelCase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_lowerCAmelCase ) not in field.type.__args__
):
raise ValueError(
"""Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"""
""" the argument parser only supports one type per argument."""
F' Problem encountered in field \'{field.name}\'.' )
if type(_lowerCAmelCase ) not in field.type.__args__:
# filter `str` in Union
__lowercase = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__lowercase = getattr(field.type , """__origin__""" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__lowercase = (
field.type.__args__[0] if isinstance(_lowerCAmelCase , field.type.__args__[1] ) else field.type.__args__[1]
)
__lowercase = getattr(field.type , """__origin__""" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__lowercase = {}
if origin_type is Literal or (isinstance(field.type , _lowerCAmelCase ) and issubclass(field.type , _lowerCAmelCase )):
if origin_type is Literal:
__lowercase = field.type.__args__
else:
__lowercase = [x.value for x in field.type]
__lowercase = make_choice_type_function(kwargs["""choices"""] )
if field.default is not dataclasses.MISSING:
__lowercase = field.default
else:
__lowercase = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__lowercase = copy(_lowerCAmelCase )
# Hack because type=bool in argparse does not behave as we want.
__lowercase = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__lowercase = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__lowercase = default
# This tells argparse we accept 0 or 1 value after --field_name
__lowercase = """?"""
# This is the value that will get picked if we do --field_name (without value)
__lowercase = True
elif isclass(_lowerCAmelCase ) and issubclass(_lowerCAmelCase , _lowerCAmelCase ):
__lowercase = field.type.__args__[0]
__lowercase = """+"""
if field.default_factory is not dataclasses.MISSING:
__lowercase = field.default_factory()
elif field.default is dataclasses.MISSING:
__lowercase = True
else:
__lowercase = field.type
if field.default is not dataclasses.MISSING:
__lowercase = field.default
elif field.default_factory is not dataclasses.MISSING:
__lowercase = field.default_factory()
else:
__lowercase = True
parser.add_argument(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__lowercase = False
parser.add_argument(F'--no_{field.name}' , action="""store_false""" , dest=field.name , **_lowerCAmelCase )
def _a ( self : List[str] , _lowerCAmelCase : DataClassType ) -> List[Any]:
"""simple docstring"""
if hasattr(_lowerCAmelCase , """_argument_group_name""" ):
__lowercase = self.add_argument_group(dtype._argument_group_name )
else:
__lowercase = self
try:
__lowercase = get_type_hints(_lowerCAmelCase )
except NameError:
raise RuntimeError(
F'Type resolution failed for {dtype}. Try declaring the class in global scope or '
"""removing line of `from __future__ import annotations` which opts in Postponed """
"""Evaluation of Annotations (PEP 563)""" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_lowerCAmelCase ):
__lowercase = """.""".join(map(_lowerCAmelCase , sys.version_info[:3] ) )
raise RuntimeError(
F'Type resolution failed for {dtype} on Python {python_version}. Try removing '
"""line of `from __future__ import annotations` which opts in union types as """
"""`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """
"""support Python versions that lower than 3.10, you need to use """
"""`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """
"""`X | None`.""" ) from ex
raise
for field in dataclasses.fields(_lowerCAmelCase ):
if not field.init:
continue
__lowercase = type_hints[field.name]
self._parse_dataclass_field(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self : int , _lowerCAmelCase : str=None , _lowerCAmelCase : Dict=False , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : int=None , ) -> Tuple[DataClass, ...]:
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
__lowercase = []
if args_filename:
args_files.append(Path(_lowerCAmelCase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(""".args""" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__lowercase = ArgumentParser()
args_file_parser.add_argument(_lowerCAmelCase , type=_lowerCAmelCase , action="""append""" )
# Use only remaining args for further parsing (remove the args_file_flag)
__lowercase , __lowercase = args_file_parser.parse_known_args(args=_lowerCAmelCase )
__lowercase = vars(_lowerCAmelCase ).get(args_file_flag.lstrip("""-""" ) , _lowerCAmelCase )
if cmd_args_file_paths:
args_files.extend([Path(_lowerCAmelCase ) for p in cmd_args_file_paths] )
__lowercase = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__lowercase = file_args + args if args is not None else file_args + sys.argv[1:]
__lowercase , __lowercase = self.parse_known_args(args=_lowerCAmelCase )
__lowercase = []
for dtype in self.dataclass_types:
__lowercase = {f.name for f in dataclasses.fields(_lowerCAmelCase ) if f.init}
__lowercase = {k: v for k, v in vars(_lowerCAmelCase ).items() if k in keys}
for k in keys:
delattr(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = dtype(**_lowerCAmelCase )
outputs.append(_lowerCAmelCase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_lowerCAmelCase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' )
return (*outputs,)
def _a ( self : int , _lowerCAmelCase : Dict[str, Any] , _lowerCAmelCase : bool = False ) -> Tuple[DataClass, ...]:
"""simple docstring"""
__lowercase = set(args.keys() )
__lowercase = []
for dtype in self.dataclass_types:
__lowercase = {f.name for f in dataclasses.fields(_lowerCAmelCase ) if f.init}
__lowercase = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
__lowercase = dtype(**_lowerCAmelCase )
outputs.append(_lowerCAmelCase )
if not allow_extra_keys and unused_keys:
raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(_lowerCAmelCase )}' )
return tuple(_lowerCAmelCase )
def _a ( self : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : bool = False ) -> Tuple[DataClass, ...]:
"""simple docstring"""
with open(Path(_lowerCAmelCase ) , encoding="""utf-8""" ) as open_json_file:
__lowercase = json.loads(open_json_file.read() )
__lowercase = self.parse_dict(_lowerCAmelCase , allow_extra_keys=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def _a ( self : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : bool = False ) -> Tuple[DataClass, ...]:
"""simple docstring"""
__lowercase = self.parse_dict(yaml.safe_load(Path(_lowerCAmelCase ).read_text() ) , allow_extra_keys=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 53 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class __UpperCamelCase :
def __init__( self : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int = 13 , _lowerCAmelCase : int = 64 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = True , _lowerCAmelCase : int = 128 , _lowerCAmelCase : Optional[int]=[16, 32, 64, 128] , _lowerCAmelCase : int = 7 , _lowerCAmelCase : int = 4 , _lowerCAmelCase : int = 37 , _lowerCAmelCase : str = "gelu" , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : int = 10 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 128 , _lowerCAmelCase : List[int] = [2, 2, 2, 2] , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 2 , ) -> Tuple:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = encoder_stride
__lowercase = num_attention_outputs
__lowercase = embed_dim
__lowercase = embed_dim + 1
__lowercase = resolution
__lowercase = depths
__lowercase = hidden_sizes
__lowercase = dim
__lowercase = mlp_expansion_ratio
def _a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self : Optional[Any] ) -> str:
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFEfficientFormerModel(config=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.type_sequence_label_size
__lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase = 1
__lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase )
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__snake_case :Any = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__snake_case :int = False
__snake_case :Optional[int] = False
__snake_case :int = False
__snake_case :Any = False
__snake_case :Any = False
def _a ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = TFEfficientFormerModelTester(self )
__lowercase = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def _a ( self : Optional[int] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def _a ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def _a ( self : int ) -> str:
"""simple docstring"""
pass
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(_lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ):
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
__lowercase = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
__lowercase = seq_length * self.model_tester.chunk_length
else:
__lowercase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__lowercase = outputs.decoder_hidden_states
self.asseretIsInstance(_lowerCAmelCase , (list, tuple) )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """decoder_seq_length""" , _lowerCAmelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=False ) -> Dict:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _a ( self : int ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFEfficientFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def _a ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """encoder_seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """key_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """chunk_length""" , _lowerCAmelCase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
__lowercase = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__lowercase = True
__lowercase = False
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__lowercase = model_class(_lowerCAmelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__lowercase = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowerCAmelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__lowercase = model(_lowerCAmelCase )
self.assertTrue(outputs_dict is not None )
def snake_case ( ):
'''simple docstring'''
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@slow
def _a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 53 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class snake_case__(_lowerCAmelCase ):
"""simple docstring"""
lowercase_ = "levit"
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int]=224 , SCREAMING_SNAKE_CASE : Union[str, Any]=3 , SCREAMING_SNAKE_CASE : List[str]=3 , SCREAMING_SNAKE_CASE : Tuple=2 , SCREAMING_SNAKE_CASE : Union[str, Any]=1 , SCREAMING_SNAKE_CASE : Optional[int]=16 , SCREAMING_SNAKE_CASE : List[str]=[128, 256, 384] , SCREAMING_SNAKE_CASE : List[str]=[4, 8, 12] , SCREAMING_SNAKE_CASE : Optional[Any]=[4, 4, 4] , SCREAMING_SNAKE_CASE : Tuple=[16, 16, 16] , SCREAMING_SNAKE_CASE : Union[str, Any]=0 , SCREAMING_SNAKE_CASE : Tuple=[2, 2, 2] , SCREAMING_SNAKE_CASE : str=[2, 2, 2] , SCREAMING_SNAKE_CASE : int=0.02 , **SCREAMING_SNAKE_CASE : List[str] , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase__ : Union[str, Any] = image_size
lowercase__ : List[str] = num_channels
lowercase__ : Any = kernel_size
lowercase__ : Union[str, Any] = stride
lowercase__ : int = padding
lowercase__ : Union[str, Any] = hidden_sizes
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : Dict = depths
lowercase__ : int = key_dim
lowercase__ : List[Any] = drop_path_rate
lowercase__ : Dict = patch_size
lowercase__ : Optional[Any] = attention_ratio
lowercase__ : Optional[Any] = mlp_ratio
lowercase__ : Tuple = initializer_range
lowercase__ : Optional[Any] = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class snake_case__(_lowerCAmelCase ):
"""simple docstring"""
lowercase_ = version.parse("""1.11""" )
@property
def snake_case ( self : Optional[Any] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def snake_case ( self : List[Any] ):
return 1E-4
| 496 |
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ) -> Dict:
A__ = ["a", "b", "c"]
# Defaults to last layer if both are None
A__ , A__ = get_aligned_output_features_output_indices(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ["c"] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [2] )
# Out indices set to match out features
A__ , A__ = get_aligned_output_features_output_indices(["a", "c"] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ["a", "c"] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [0, 2] )
# Out features set to match out indices
A__ , A__ = get_aligned_output_features_output_indices(SCREAMING_SNAKE_CASE__ , [0, 2] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ["a", "c"] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [0, 2] )
# Out features selected from negative indices
A__ , A__ = get_aligned_output_features_output_indices(SCREAMING_SNAKE_CASE__ , [-3, -1] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ["a", "c"] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [-3, -1] )
def snake_case__ ( self ) -> Dict:
# Stage names must be set
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , SCREAMING_SNAKE_CASE__ )
# Out features must be a list
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(SCREAMING_SNAKE_CASE__ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(SCREAMING_SNAKE_CASE__ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def snake_case__ ( self ) -> List[Any]:
A__ = BackboneMixin()
A__ = ["a", "b", "c"]
A__ = ["a", "c"]
A__ = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
A__ = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
A__ = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 104 | 0 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = {
"""image_size""": """sample_size""",
"""num_res_blocks""": """layers_per_block""",
"""block_channels""": """block_out_channels""",
"""down_blocks""": """down_block_types""",
"""up_blocks""": """up_block_types""",
"""downscale_freq_shift""": """freq_shift""",
"""resnet_num_groups""": """norm_num_groups""",
"""resnet_act_fn""": """act_fn""",
"""resnet_eps""": """norm_eps""",
"""num_head_channels""": """attention_head_dim""",
}
SCREAMING_SNAKE_CASE__ = {
"""time_steps""": """time_proj""",
"""mid""": """mid_block""",
"""downsample_blocks""": """down_blocks""",
"""upsample_blocks""": """up_blocks""",
}
SCREAMING_SNAKE_CASE__ = """""" if has_file(args.repo_path, """config.json""") else """unet"""
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
SCREAMING_SNAKE_CASE__ = reader.read()
SCREAMING_SNAKE_CASE__ = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
SCREAMING_SNAKE_CASE__ = UNetaDModel(**config)
else:
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel
SCREAMING_SNAKE_CASE__ = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
SCREAMING_SNAKE_CASE__ = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
SCREAMING_SNAKE_CASE__ = config[key]
del config[key]
SCREAMING_SNAKE_CASE__ = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]]
SCREAMING_SNAKE_CASE__ = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]]
if do_only_weights:
SCREAMING_SNAKE_CASE__ = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
SCREAMING_SNAKE_CASE__ = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
SCREAMING_SNAKE_CASE__ = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
SCREAMING_SNAKE_CASE__ = param_value
SCREAMING_SNAKE_CASE__ = True
if not has_changed:
SCREAMING_SNAKE_CASE__ = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 707 |
from __future__ import annotations
import math
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int ) -> float:
__lowercase = u
for i in range(1 , SCREAMING_SNAKE_CASE ):
__lowercase = temp * (u - i)
return temp
def __SCREAMING_SNAKE_CASE ( ) -> None:
__lowercase = int(input('enter the numbers of values: ' ) )
__lowercase = []
for _ in range(SCREAMING_SNAKE_CASE ):
y.append([] )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
y[i].append(SCREAMING_SNAKE_CASE )
__lowercase = 0
print('enter the values of parameters in a list: ' )
__lowercase = list(map(SCREAMING_SNAKE_CASE , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(SCREAMING_SNAKE_CASE ):
__lowercase = float(input() )
__lowercase = int(input('enter the value to interpolate: ' ) )
__lowercase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , SCREAMING_SNAKE_CASE ):
for j in range(n - i ):
__lowercase = y[j + 1][i - 1] - y[j][i - 1]
__lowercase = y[0][0]
for i in range(1 , SCREAMING_SNAKE_CASE ):
summ += (ucal(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) * y[0][i]) / math.factorial(SCREAMING_SNAKE_CASE )
print(F"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 688 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( _lowerCamelCase: int ):
assert (
isinstance(_lowerCamelCase , _lowerCamelCase ) and number_of_steps > 0
), F"number_of_steps needs to be positive integer, your input {number_of_steps}"
if number_of_steps == 1:
return 1
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = 1, 1
for _ in range(number_of_steps - 1 ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod() | 578 |
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowerCAmelCase_ ( _lowerCamelCase: str , _lowerCamelCase: float | Decimal , _lowerCamelCase: float = 10**-10 ):
__SCREAMING_SNAKE_CASE : List[Any] = a
while True:
__SCREAMING_SNAKE_CASE : Optional[Any] = Decimal(_lowerCamelCase ) - (
Decimal(eval(_lowerCamelCase ) ) / Decimal(eval(str(diff(_lowerCamelCase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_lowerCamelCase ) ) < precision: # noqa: S307
return float(_lowerCamelCase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
print(f"The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}")
# Find Square Root of 5
print(f"The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}")
# Exponential Roots
print(f"The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}") | 578 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json''',
'''google/bigbird-roberta-large''': '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json''',
'''google/bigbird-base-trivia-itc''': '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json''',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Dict = """big_bird"""
def __init__( self , snake_case=5_0358 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu_new" , snake_case=0.1 , snake_case=0.1 , snake_case=4096 , snake_case=2 , snake_case=0.02 , snake_case=1E-12 , snake_case=True , snake_case=0 , snake_case=1 , snake_case=2 , snake_case=66 , snake_case="block_sparse" , snake_case=True , snake_case=False , snake_case=64 , snake_case=3 , snake_case=None , **snake_case , ):
super().__init__(
pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , sep_token_id=snake_case , **snake_case , )
lowercase = vocab_size
lowercase = max_position_embeddings
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = type_vocab_size
lowercase = layer_norm_eps
lowercase = use_cache
lowercase = rescale_embeddings
lowercase = attention_type
lowercase = use_bias
lowercase = block_size
lowercase = num_random_blocks
lowercase = classifier_dropout
class A_ ( __lowerCamelCase ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self.task == "multiple-choice":
lowercase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowercase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 712 |
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
lowercase = 128
elif "12-12" in model_name:
lowercase = 12
lowercase = 12
elif "14-14" in model_name:
lowercase = 14
lowercase = 14
elif "16-16" in model_name:
lowercase = 16
lowercase = 16
else:
raise ValueError('Model not supported' )
lowercase = 'huggingface/label-files'
if "speech-commands" in model_name:
lowercase = 35
lowercase = 'speech-commands-v2-id2label.json'
else:
lowercase = 527
lowercase = 'audioset-id2label.json'
lowercase = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if "module.v" in name:
lowercase = name.replace('module.v' , 'audio_spectrogram_transformer' )
if "cls_token" in name:
lowercase = name.replace('cls_token' , 'embeddings.cls_token' )
if "dist_token" in name:
lowercase = name.replace('dist_token' , 'embeddings.distillation_token' )
if "pos_embed" in name:
lowercase = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
lowercase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
# transformer blocks
if "blocks" in name:
lowercase = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
lowercase = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowercase = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowercase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowercase = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowercase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowercase = name.replace('mlp.fc2' , 'output.dense' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
lowercase = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' )
# classifier head
if "module.mlp_head.0" in name:
lowercase = name.replace('module.mlp_head.0' , 'classifier.layernorm' )
if "module.mlp_head.1" in name:
lowercase = name.replace('module.mlp_head.1' , 'classifier.dense' )
return name
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(__SCREAMING_SNAKE_CASE )
if "qkv" in key:
lowercase = key.split('.' )
lowercase = int(key_split[3] )
lowercase = config.hidden_size
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
else:
lowercase = val
return orig_state_dict
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = [
'module.v.head.weight',
'module.v.head.bias',
'module.v.head_dist.weight',
'module.v.head_dist.bias',
]
for k in ignore_keys:
state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@torch.no_grad()
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
lowercase = get_audio_spectrogram_transformer_config(__SCREAMING_SNAKE_CASE )
lowercase = {
'ast-finetuned-audioset-10-10-0.4593': (
'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.450': (
'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448': (
'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448-v2': (
'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'
),
'ast-finetuned-audioset-12-12-0.447': (
'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'
),
'ast-finetuned-audioset-14-14-0.443': (
'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'
),
'ast-finetuned-audioset-16-16-0.442': (
'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'
),
'ast-finetuned-speech-commands-v2': (
'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'
),
}
# load original state_dict
lowercase = model_name_to_url[model_name]
lowercase = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location='cpu' )
# remove some keys
remove_keys(__SCREAMING_SNAKE_CASE )
# rename some keys
lowercase = convert_state_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# load 🤗 model
lowercase = ASTForAudioClassification(__SCREAMING_SNAKE_CASE )
model.eval()
model.load_state_dict(__SCREAMING_SNAKE_CASE )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
lowercase = -4.2_67_73_93 if 'speech-commands' not in model_name else -6.84_59_78
lowercase = 4.5_68_99_74 if 'speech-commands' not in model_name else 5.5_65_45_26
lowercase = 1024 if 'speech-commands' not in model_name else 128
lowercase = ASTFeatureExtractor(mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
if "speech-commands" in model_name:
lowercase = load_dataset('speech_commands' , 'v0.02' , split='validation' )
lowercase = dataset[0]['audio']['array']
else:
lowercase = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , )
lowercase , lowercase = torchaudio.load(__SCREAMING_SNAKE_CASE )
lowercase = waveform.squeeze().numpy()
lowercase = feature_extractor(__SCREAMING_SNAKE_CASE , sampling_rate=1_6000 , return_tensors='pt' )
# forward pass
lowercase = model(**__SCREAMING_SNAKE_CASE )
lowercase = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
lowercase = torch.tensor([-0.87_60, -7.00_42, -8.66_02] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
lowercase = torch.tensor([-1.19_86, -7.09_03, -8.27_18] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
lowercase = torch.tensor([-2.61_28, -8.00_80, -9.43_44] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
lowercase = torch.tensor([-1.50_80, -7.45_34, -8.89_17] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
lowercase = torch.tensor([-0.50_50, -6.58_33, -8.08_43] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
lowercase = torch.tensor([-0.38_26, -7.03_36, -8.24_13] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
lowercase = torch.tensor([-1.21_13, -6.91_01, -8.34_70] )
elif model_name == "ast-finetuned-speech-commands-v2":
lowercase = torch.tensor([6.15_89, -8.05_66, -8.79_84] )
else:
raise ValueError('Unknown model name' )
if not torch.allclose(logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ):
raise ValueError('Logits don\'t match' )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
print('Pushing model and feature extractor to the hub...' )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''ast-finetuned-audioset-10-10-0.4593''',
type=str,
help='''Name of the Audio Spectrogram Transformer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCAmelCase = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 565 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def _a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : List[Any] = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.dummy_uncond_unet
UpperCamelCase : Dict = ScoreSdeVeScheduler()
UpperCamelCase : Any = ScoreSdeVePipeline(unet=_A , scheduler=_A )
sde_ve.to(_A )
sde_ve.set_progress_bar_config(disable=_A )
UpperCamelCase : List[Any] = torch.manual_seed(0 )
UpperCamelCase : Dict = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=_A ).images
UpperCamelCase : Optional[int] = torch.manual_seed(0 )
UpperCamelCase : Optional[Any] = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=_A , return_dict=_A )[
0
]
UpperCamelCase : List[str] = image[0, -3:, -3:, -1]
UpperCamelCase : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
UpperCamelCase : List[str] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowercase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = """google/ncsnpp-church-256"""
UpperCamelCase : Any = UNetaDModel.from_pretrained(_A )
UpperCamelCase : List[str] = ScoreSdeVeScheduler.from_pretrained(_A )
UpperCamelCase : Tuple = ScoreSdeVePipeline(unet=_A , scheduler=_A )
sde_ve.to(_A )
sde_ve.set_progress_bar_config(disable=_A )
UpperCamelCase : Dict = torch.manual_seed(0 )
UpperCamelCase : Any = sde_ve(num_inference_steps=1_0 , output_type="""numpy""" , generator=_A ).images
UpperCamelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
UpperCamelCase : List[Any] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 102 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class lowercase_ :
'''simple docstring'''
@staticmethod
def snake_case_ ( *a_ , **a_ ) -> Tuple:
"""simple docstring"""
pass
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Image ):
UpperCAmelCase = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Image ):
UpperCAmelCase = np.array(SCREAMING_SNAKE_CASE )
UpperCAmelCase = npimg.shape
return {"hash": hashimage(SCREAMING_SNAKE_CASE ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase : Tuple = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
__lowerCAmelCase : Tuple = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def snake_case_ ( self , a_ , a_ , a_ ) -> Dict:
"""simple docstring"""
UpperCAmelCase = MaskGenerationPipeline(model=a_ , image_processor=a_ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def snake_case_ ( self , a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@slow
@require_torch
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = pipeline('mask-generation' , model='facebook/sam-vit-huge' )
UpperCAmelCase = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=2_5_6 )
# Shortening by hashing
UpperCAmelCase = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(a_ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0444},
{'mask': {'hash': '6affa964c6', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.021},
{'mask': {'hash': 'dfe28a0388', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0132},
{'mask': {'hash': 'fe8065c197', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0053},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9967},
{'mask': {'hash': '453c7844bd', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.993},
{'mask': {'hash': '3d44f2926d', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9909},
{'mask': {'hash': '64033ddc3f', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9879},
{'mask': {'hash': '801064ff79', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9834},
{'mask': {'hash': '6172f276ef', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9716},
{'mask': {'hash': 'b49e60e084', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9612},
{'mask': {'hash': 'a811e775fd', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9599},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9552},
{'mask': {'hash': '9d8257e080', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9532},
{'mask': {'hash': '32de6454a8', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9516},
{'mask': {'hash': 'af3d4af2c8', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9499},
{'mask': {'hash': '3c6db475fb', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9483},
{'mask': {'hash': 'c290813fb9', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9464},
{'mask': {'hash': 'b6f0b8f606', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.943},
{'mask': {'hash': '92ce16bfdf', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.943},
{'mask': {'hash': 'c749b25868', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9408},
{'mask': {'hash': 'efb6cab859', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9335},
{'mask': {'hash': '1ff2eafb30', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9326},
{'mask': {'hash': '788b798e24', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9262},
{'mask': {'hash': 'abea804f0e', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.8999},
{'mask': {'hash': '7b9e8ddb73', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.8986},
{'mask': {'hash': 'cd24047c8a', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.8984},
{'mask': {'hash': '6943e6bcbd', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.8873},
{'mask': {'hash': 'b5f47c9191', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.8871}
] , )
# fmt: on
@require_torch
@slow
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = 'facebook/sam-vit-huge'
UpperCAmelCase = pipeline('mask-generation' , model=a_ )
UpperCAmelCase = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=2_5_6 )
# Shortening by hashing
UpperCAmelCase = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(a_ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0444},
{'mask': {'hash': '6affa964c6', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0210},
{'mask': {'hash': 'dfe28a0388', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0132},
{'mask': {'hash': 'fe8065c197', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0053},
] , )
| 447 | 0 |
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __lowerCAmelCase ( __UpperCamelCase : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def __lowerCAmelCase ( __UpperCamelCase : np.ndarray , __UpperCamelCase : np.ndarray ):
'''simple docstring'''
snake_case_ : Dict = XGBClassifier()
classifier.fit(__UpperCamelCase , __UpperCamelCase )
return classifier
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[Any] = load_iris()
snake_case_ , snake_case_ : Optional[Any] = data_handling(__UpperCamelCase )
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[str] = train_test_split(
__UpperCamelCase , __UpperCamelCase , test_size=0.25 )
snake_case_ : List[str] = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
snake_case_ : List[str] = xgboost(__UpperCamelCase , __UpperCamelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , display_labels=__UpperCamelCase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : bool = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 1_0 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
snake_case_ : List[Any] = [
2_0_4_7,
1_3_7_3_6_5_3,
2_5_3_2_6_0_0_1,
3_2_1_5_0_3_1_7_5_1,
2_1_5_2_3_0_2_8_9_8_7_4_7,
3_4_7_4_7_4_9_6_6_0_3_8_3,
3_4_1_5_5_0_0_7_1_7_2_8_3_2_1,
1,
3_8_2_5_1_2_3_0_5_6_5_4_6_4_1_3_0_5_1,
1,
1,
3_1_8_6_6_5_8_5_7_8_3_4_0_3_1_1_5_1_1_6_7_4_6_1,
3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1,
]
snake_case_ : Dict = [2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1]
for idx, _p in enumerate(__UpperCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
snake_case_ : Optional[int] = primes[:idx]
break
snake_case_ , snake_case_ : Tuple = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
snake_case_ : List[str] = False
for r in range(__UpperCamelCase ):
snake_case_ : int = pow(__UpperCamelCase , d * 2**r , __UpperCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
snake_case_ : Optional[Any] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def __lowerCAmelCase ( ):
'''simple docstring'''
assert not miller_rabin(5_6_1 )
assert miller_rabin(5_6_3 )
# 2047
assert not miller_rabin(8_3_8_2_0_1 )
assert miller_rabin(8_3_8_2_0_7 )
# 1_373_653
assert not miller_rabin(1_7_3_1_6_0_0_1 )
assert miller_rabin(1_7_3_1_6_0_1_7 )
# 25_326_001
assert not miller_rabin(3_0_7_8_3_8_6_6_4_1 )
assert miller_rabin(3_0_7_8_3_8_6_6_5_3 )
# 3_215_031_751
assert not miller_rabin(1_7_1_3_0_4_5_5_7_4_8_0_1 )
assert miller_rabin(1_7_1_3_0_4_5_5_7_4_8_1_9 )
# 2_152_302_898_747
assert not miller_rabin(2_7_7_9_7_9_9_7_2_8_3_0_7 )
assert miller_rabin(2_7_7_9_7_9_9_7_2_8_3_2_7 )
# 3_474_749_660_383
assert not miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_4_4_1 )
assert miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_5_2_7 )
# 341_550_071_728_321
assert not miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_5_1 )
assert miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_9_1 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_8_6_7 )
assert miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_9_5_1 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_3_3 )
assert miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_5_9 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 21 | 1 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Optional[int], _UpperCAmelCase : Tuple, _UpperCAmelCase : List[str]=1_3, _UpperCAmelCase : Any=7, _UpperCAmelCase : Optional[int]=False, _UpperCAmelCase : int=True, _UpperCAmelCase : List[Any]=False, _UpperCAmelCase : str=False, _UpperCAmelCase : Dict=1_9, _UpperCAmelCase : int=3_2, _UpperCAmelCase : Union[str, Any]=5, _UpperCAmelCase : Tuple=4, _UpperCAmelCase : Union[str, Any]=3_7, _UpperCAmelCase : Union[str, Any]="gelu", _UpperCAmelCase : Optional[Any]=0.1, _UpperCAmelCase : str=0.1, _UpperCAmelCase : List[str]=5_1_2, _UpperCAmelCase : Tuple=1_6, _UpperCAmelCase : Any=2, _UpperCAmelCase : str=0.02, _UpperCAmelCase : Any=3, _UpperCAmelCase : int=4, _UpperCAmelCase : Union[str, Any]=None, ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = parent
SCREAMING_SNAKE_CASE__ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = seq_length
SCREAMING_SNAKE_CASE__ : Union[str, Any] = is_training
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_input_mask
SCREAMING_SNAKE_CASE__ : List[str] = use_token_type_ids
SCREAMING_SNAKE_CASE__ : Any = use_labels
SCREAMING_SNAKE_CASE__ : str = vocab_size
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : str = initializer_range
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_labels
SCREAMING_SNAKE_CASE__ : str = num_choices
SCREAMING_SNAKE_CASE__ : List[str] = scope
def A_ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE__ : Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size], self.num_choices )
SCREAMING_SNAKE_CASE__ : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = EsmConfig(
vocab_size=3_3, hidden_size=self.hidden_size, pad_token_id=1, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, is_folding_model=_UpperCAmelCase, esmfold_config={"trunk": {"num_blocks": 2}, "fp16_esm": False}, )
return config
def A_ ( self : str, _UpperCAmelCase : str, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : List[Any], _UpperCAmelCase : List[Any], _UpperCAmelCase : Tuple, _UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = EsmForProteinFolding(config=_UpperCAmelCase ).float()
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Any = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = model(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = model(_UpperCAmelCase )
self.parent.assertEqual(result.positions.shape, (8, self.batch_size, self.seq_length, 1_4, 3) )
self.parent.assertEqual(result.angles.shape, (8, self.batch_size, self.seq_length, 7, 2) )
def A_ ( self : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE__
) : str = config_and_inputs
SCREAMING_SNAKE_CASE__ : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = False
UpperCAmelCase_ = (EsmForProteinFolding,) if is_torch_available() else ()
UpperCAmelCase_ = ()
UpperCAmelCase_ = {} if is_torch_available() else {}
UpperCAmelCase_ = False
def A_ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = EsmFoldModelTester(self )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ConfigTester(self, config_class=_UpperCAmelCase, hidden_size=3_7 )
def A_ ( self : List[Any] ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def A_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
@unittest.skip("Does not support attention outputs" )
def A_ ( self : Tuple ) -> str:
"""simple docstring"""
pass
@unittest.skip
def A_ ( self : Tuple ) -> str:
"""simple docstring"""
pass
@unittest.skip("Esm does not support embedding resizing" )
def A_ ( self : int ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("Esm does not support embedding resizing" )
def A_ ( self : Any ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("ESMFold does not support passing input embeds!" )
def A_ ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip("ESMFold does not support head pruning." )
def A_ ( self : Any ) -> Dict:
"""simple docstring"""
pass
@unittest.skip("ESMFold does not support head pruning." )
def A_ ( self : Any ) -> str:
"""simple docstring"""
pass
@unittest.skip("ESMFold does not support head pruning." )
def A_ ( self : int ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("ESMFold does not support head pruning." )
def A_ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("ESMFold does not support head pruning." )
def A_ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("ESMFold does not output hidden states in the normal way." )
def A_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("ESMfold does not output hidden states in the normal way." )
def A_ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("ESMFold only has one output format." )
def A_ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("This test doesn\'t work for ESMFold and doesn\'t test core functionality" )
def A_ ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("ESMFold does not support input chunking." )
def A_ ( self : Tuple ) -> str:
"""simple docstring"""
pass
@unittest.skip("ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments." )
def A_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("ESMFold doesn\'t support torchscript compilation." )
def A_ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("ESMFold doesn\'t support torchscript compilation." )
def A_ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("ESMFold doesn\'t support torchscript compilation." )
def A_ ( self : int ) -> Any:
"""simple docstring"""
pass
@unittest.skip("ESMFold doesn\'t support data parallel." )
def A_ ( self : Dict ) -> str:
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
pass
@require_torch
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
@slow
def A_ ( self : Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1" ).float()
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_UpperCAmelCase )['''positions''']
SCREAMING_SNAKE_CASE__ : int = torch.tensor([2.5828, 0.7993, -1_0.9_3_3_4], dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0], _UpperCAmelCase, atol=1E-4 ) )
| 663 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
_A = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ ( snake_case ):
__a : Optional[float] = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
__a : bool = field(default=snake_case , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
__a : bool = field(
default=snake_case , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
__a : bool = field(default=snake_case , metadata={'''help''': '''whether to use adafactor'''} )
__a : Optional[float] = field(
default=snake_case , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
__a : Optional[float] = field(
default=snake_case , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
__a : Optional[float] = field(default=snake_case , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
__a : Optional[float] = field(
default=snake_case , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
__a : Optional[str] = field(
default='''linear''' , metadata={'''help''': f'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 158 | 0 |
import numpy as np
def A ( _lowercase ):
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34 | from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def A ( _lowercase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = analyze_text(_lowercase )
SCREAMING_SNAKE_CASE : Any = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
SCREAMING_SNAKE_CASE : Tuple = sum(single_char_strings.values() )
# one length string
SCREAMING_SNAKE_CASE : Tuple = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
SCREAMING_SNAKE_CASE : Tuple = single_char_strings[ch]
SCREAMING_SNAKE_CASE : List[str] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
SCREAMING_SNAKE_CASE : Optional[Any] = sum(two_char_strings.values() )
SCREAMING_SNAKE_CASE : List[str] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
SCREAMING_SNAKE_CASE : Union[str, Any] = cha + cha
if sequence in two_char_strings:
SCREAMING_SNAKE_CASE : Any = two_char_strings[sequence]
SCREAMING_SNAKE_CASE : Dict = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = Counter() # type: ignore
SCREAMING_SNAKE_CASE : Any = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def A ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 34 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class __UpperCAmelCase ( lowerCAmelCase ,lowerCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_UpperCamelCase = (
{
"""feature-extraction""": TFMobileBertModel,
"""fill-mask""": TFMobileBertForMaskedLM,
"""question-answering""": TFMobileBertForQuestionAnswering,
"""text-classification""": TFMobileBertForSequenceClassification,
"""token-classification""": TFMobileBertForTokenClassification,
"""zero-shot""": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase = False
_UpperCamelCase = False
def __snake_case ( self : str , _lowercase : Union[str, Any] , _lowercase : List[str] , _lowercase : int=False) -> List[str]:
A_ = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase)
if return_labels:
if model_class in get_values(_lowercase):
A_ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
return inputs_dict
class __UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , _lowercase : Union[str, Any] , _lowercase : Any=13 , _lowercase : Union[str, Any]=7 , _lowercase : List[Any]=True , _lowercase : Optional[int]=True , _lowercase : Union[str, Any]=True , _lowercase : int=True , _lowercase : Optional[int]=99 , _lowercase : Optional[Any]=32 , _lowercase : int=32 , _lowercase : Optional[Any]=2 , _lowercase : Optional[Any]=4 , _lowercase : List[Any]=37 , _lowercase : List[str]="gelu" , _lowercase : List[Any]=0.1 , _lowercase : Dict=0.1 , _lowercase : int=512 , _lowercase : Dict=16 , _lowercase : Dict=2 , _lowercase : Optional[Any]=0.02 , _lowercase : Dict=3 , _lowercase : Dict=4 , _lowercase : str=None , ) -> int:
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = type_sequence_label_size
A_ = initializer_range
A_ = num_labels
A_ = num_choices
A_ = scope
A_ = embedding_size
def __snake_case ( self : Optional[Any]) -> str:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length])
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
A_ = ids_tensor([self.batch_size] , self.num_choices)
A_ = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : Optional[int] , _lowercase : Tuple , _lowercase : int , _lowercase : Tuple , _lowercase : Optional[Any] , _lowercase : Optional[Any]) -> Union[str, Any]:
A_ = TFMobileBertModel(config=_lowercase)
A_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A_ = model(_lowercase)
A_ = [input_ids, input_mask]
A_ = model(_lowercase)
A_ = model(_lowercase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def __snake_case ( self : Optional[Any] , _lowercase : Any , _lowercase : Any , _lowercase : Any , _lowercase : List[Any] , _lowercase : Tuple , _lowercase : Union[str, Any] , _lowercase : Optional[int]) -> Optional[Any]:
A_ = TFMobileBertForMaskedLM(config=_lowercase)
A_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A_ = model(_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __snake_case ( self : int , _lowercase : str , _lowercase : Dict , _lowercase : Tuple , _lowercase : List[str] , _lowercase : List[Any] , _lowercase : Optional[int] , _lowercase : List[Any]) -> List[Any]:
A_ = TFMobileBertForNextSentencePrediction(config=_lowercase)
A_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A_ = model(_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def __snake_case ( self : Tuple , _lowercase : Optional[int] , _lowercase : Dict , _lowercase : Optional[int] , _lowercase : Any , _lowercase : int , _lowercase : Optional[Any] , _lowercase : Union[str, Any]) -> Dict:
A_ = TFMobileBertForPreTraining(config=_lowercase)
A_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A_ = model(_lowercase)
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def __snake_case ( self : Optional[int] , _lowercase : Optional[Any] , _lowercase : List[Any] , _lowercase : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : Dict , _lowercase : str , _lowercase : Union[str, Any]) -> Tuple:
A_ = self.num_labels
A_ = TFMobileBertForSequenceClassification(config=_lowercase)
A_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A_ = model(_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __snake_case ( self : List[str] , _lowercase : Tuple , _lowercase : Tuple , _lowercase : List[Any] , _lowercase : Tuple , _lowercase : List[Any] , _lowercase : Union[str, Any] , _lowercase : str) -> Dict:
A_ = self.num_choices
A_ = TFMobileBertForMultipleChoice(config=_lowercase)
A_ = tf.tile(tf.expand_dims(_lowercase , 1) , (1, self.num_choices, 1))
A_ = tf.tile(tf.expand_dims(_lowercase , 1) , (1, self.num_choices, 1))
A_ = tf.tile(tf.expand_dims(_lowercase , 1) , (1, self.num_choices, 1))
A_ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
A_ = model(_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def __snake_case ( self : Tuple , _lowercase : Union[str, Any] , _lowercase : Any , _lowercase : List[Any] , _lowercase : int , _lowercase : int , _lowercase : List[Any] , _lowercase : int) -> Tuple:
A_ = self.num_labels
A_ = TFMobileBertForTokenClassification(config=_lowercase)
A_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A_ = model(_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __snake_case ( self : Optional[Any] , _lowercase : int , _lowercase : str , _lowercase : Tuple , _lowercase : str , _lowercase : Dict , _lowercase : Dict , _lowercase : Optional[Any]) -> Union[str, Any]:
A_ = TFMobileBertForQuestionAnswering(config=_lowercase)
A_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A_ = model(_lowercase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __snake_case ( self : int) -> List[str]:
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
def __snake_case ( self : List[Any]) -> Dict:
A_ = TFMobileBertModelTest.TFMobileBertModelTester(self)
A_ = ConfigTester(self , config_class=_lowercase , hidden_size=37)
def __snake_case ( self : Tuple) -> Optional[int]:
self.config_tester.run_common_tests()
def __snake_case ( self : Any) -> Optional[int]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_lowercase)
def __snake_case ( self : str) -> Optional[Any]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_lowercase)
def __snake_case ( self : Union[str, Any]) -> Any:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_lowercase)
def __snake_case ( self : Dict) -> Dict:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_lowercase)
def __snake_case ( self : Optional[int]) -> Tuple:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_lowercase)
def __snake_case ( self : List[Any]) -> int:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_lowercase)
def __snake_case ( self : List[str]) -> int:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_lowercase)
def __snake_case ( self : Optional[int]) -> Dict:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_lowercase)
@slow
def __snake_case ( self : int) -> List[Any]:
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
A_ = TFMobileBertModel.from_pretrained(_lowercase)
self.assertIsNotNone(_lowercase)
@require_tf
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __snake_case ( self : str) -> Tuple:
A_ = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased')
A_ = tf.constant([[0, 1, 2, 3, 4, 5]])
A_ = model(_lowercase)[0]
A_ = [1, 6, 30_522]
self.assertEqual(output.shape , _lowercase)
A_ = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
])
tf.debugging.assert_near(output[:, :3, :3] , _lowercase , atol=1E-4)
| 366 |
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def lowerCamelCase( SCREAMING_SNAKE_CASE_ ) -> str:
A_ = int(SCREAMING_SNAKE_CASE_ )
A_ , A_ , A_ = t // 3600, (t // 60) % 60, t % 60
return F'{h}:{m:02d}:{s:02d}' if h != 0 else F'{m:02d}:{s:02d}'
def lowerCamelCase( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=300 ) -> Dict:
# docstyle-ignore
return F'\n <div>\n {prefix}\n <progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>\n {label}\n </div>\n '
def lowerCamelCase( SCREAMING_SNAKE_CASE_ ) -> List[str]:
A_ = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F' <th>{i}</th>\n'
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
A_ = F'{elt:.6f}' if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) else str(SCREAMING_SNAKE_CASE_ )
html_code += F' <td>{elt}</td>\n'
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class __UpperCAmelCase :
'''simple docstring'''
_UpperCamelCase = 5
_UpperCamelCase = 0.2
def __init__( self : Tuple , _lowercase : int , _lowercase : Optional[str] = None , _lowercase : bool = True , _lowercase : Optional["NotebookTrainingTracker"] = None , _lowercase : int = 300 , ) -> Dict:
A_ = total
A_ = '' if prefix is None else prefix
A_ = leave
A_ = parent
A_ = width
A_ = None
A_ = None
A_ = None
def __snake_case ( self : Optional[int] , _lowercase : int , _lowercase : bool = False , _lowercase : str = None) -> Dict:
A_ = value
if comment is not None:
A_ = comment
if self.last_value is None:
A_ = A_ = time.time()
A_ = A_ = value
A_ = A_ = None
A_ = self.warmup
A_ = 1
self.update_bar(_lowercase)
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total):
if self.first_calls > 0:
self.first_calls -= 1
A_ = time.time()
A_ = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
A_ = self.elapsed_time / (value - self.start_value)
else:
A_ = None
if value >= self.total:
A_ = self.total
A_ = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
A_ = self.average_time_per_item * (self.total - value)
self.update_bar(_lowercase)
A_ = value
A_ = current_time
if self.average_time_per_item is None:
A_ = 1
else:
A_ = max(int(self.update_every / self.average_time_per_item) , 1)
def __snake_case ( self : str , _lowercase : List[Any] , _lowercase : Optional[int]=None) -> str:
A_ = ' ' * (len(str(self.total)) - len(str(_lowercase))) + str(_lowercase)
if self.elapsed_time is None:
A_ = F'[{spaced_value}/{self.total} : < :'
elif self.predicted_remaining is None:
A_ = F'[{spaced_value}/{self.total} {format_time(self.elapsed_time)}'
else:
A_ = (
F'[{spaced_value}/{self.total} {format_time(self.elapsed_time)} <'
F' {format_time(self.predicted_remaining)}'
)
self.label += F', {1/self.average_time_per_item:.2f} it/s'
self.label += "]" if self.comment is None or len(self.comment) == 0 else F', {self.comment}]'
self.display()
def __snake_case ( self : Optional[int]) -> Dict:
A_ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width)
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
A_ = disp.display(disp.HTML(self.html_code) , display_id=_lowercase)
else:
self.output.update(disp.HTML(self.html_code))
def __snake_case ( self : List[Any]) -> Optional[Any]:
if self.parent is None and self.output is not None:
self.output.update(disp.HTML(''))
class __UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , _lowercase : Union[str, Any] , _lowercase : str=None) -> str:
super().__init__(_lowercase)
A_ = None if column_names is None else [column_names]
A_ = None
def __snake_case ( self : Dict) -> List[Any]:
A_ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width)
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table)
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
A_ = disp.display(disp.HTML(self.html_code) , display_id=_lowercase)
else:
self.output.update(disp.HTML(self.html_code))
def __snake_case ( self : str , _lowercase : int) -> Optional[int]:
if self.inner_table is None:
A_ = [list(values.keys()), list(values.values())]
else:
A_ = self.inner_table[0]
if len(self.inner_table) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(_lowercase)
A_ = columns
self.inner_table.append([values[c] for c in columns])
def __snake_case ( self : List[Any] , _lowercase : Union[str, Any] , _lowercase : str=None , _lowercase : Optional[int]=300) -> Dict:
A_ = NotebookProgressBar(_lowercase , prefix=_lowercase , parent=self , width=_lowercase)
return self.child_bar
def __snake_case ( self : Any) -> int:
A_ = None
self.display()
class __UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict) -> Tuple:
A_ = None
A_ = None
A_ = False
def __snake_case ( self : int , _lowercase : str , _lowercase : str , _lowercase : str , **_lowercase : List[Any]) -> List[Any]:
A_ = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
A_ = 0
A_ = 0
A_ = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss')
A_ = NotebookTrainingTracker(state.max_steps , _lowercase)
def __snake_case ( self : Tuple , _lowercase : Union[str, Any] , _lowercase : List[Any] , _lowercase : List[str] , **_lowercase : Union[str, Any]) -> str:
A_ = int(state.epoch) if int(state.epoch) == state.epoch else F'{state.epoch:.2f}'
self.training_tracker.update(
state.global_step + 1 , comment=F'Epoch {epoch}/{state.num_train_epochs}' , force_update=self._force_next_update , )
A_ = False
def __snake_case ( self : Union[str, Any] , _lowercase : Any , _lowercase : int , _lowercase : Optional[int] , _lowercase : List[Any]=None , **_lowercase : Any) -> Union[str, Any]:
if not has_length(_lowercase):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
A_ = self.training_tracker.add_child(len(_lowercase))
else:
A_ = NotebookProgressBar(len(_lowercase))
self.prediction_bar.update(1)
else:
self.prediction_bar.update(self.prediction_bar.value + 1)
def __snake_case ( self : List[Any] , _lowercase : List[str] , _lowercase : Any , _lowercase : Union[str, Any] , **_lowercase : Optional[int]) -> Optional[Any]:
if self.prediction_bar is not None:
self.prediction_bar.close()
A_ = None
def __snake_case ( self : Dict , _lowercase : Dict , _lowercase : str , _lowercase : str , _lowercase : Optional[Any]=None , **_lowercase : Dict) -> Optional[int]:
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
A_ = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
A_ = state.global_step
self.training_tracker.write_line(_lowercase)
def __snake_case ( self : List[Any] , _lowercase : Any , _lowercase : Optional[int] , _lowercase : str , _lowercase : Optional[Any]=None , **_lowercase : Union[str, Any]) -> Union[str, Any]:
if self.training_tracker is not None:
A_ = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history):
if "loss" in log:
A_ = log['loss']
break
if self.first_column == "Epoch":
A_ = int(state.epoch)
else:
A_ = state.global_step
A_ = 'eval'
for k in metrics:
if k.endswith('_loss'):
A_ = re.sub(r'\_loss$' , '' , _lowercase)
A_ = metrics.pop('total_flos' , _lowercase)
A_ = metrics.pop('epoch' , _lowercase)
A_ = metrics.pop(F'{metric_key_prefix}_runtime' , _lowercase)
A_ = metrics.pop(F'{metric_key_prefix}_samples_per_second' , _lowercase)
A_ = metrics.pop(F'{metric_key_prefix}_steps_per_second' , _lowercase)
A_ = metrics.pop(F'{metric_key_prefix}_jit_compilation_time' , _lowercase)
for k, v in metrics.items():
if k == F'{metric_key_prefix}_loss':
A_ = v
else:
A_ = k.split('_')
A_ = ' '.join([part.capitalize() for part in splits[1:]])
A_ = v
self.training_tracker.write_line(_lowercase)
self.training_tracker.remove_child()
A_ = None
# Evaluation takes a long time so we should force the next update.
A_ = True
def __snake_case ( self : Tuple , _lowercase : Dict , _lowercase : str , _lowercase : List[str] , **_lowercase : str) -> Optional[int]:
self.training_tracker.update(
state.global_step , comment=F'Epoch {int(state.epoch)}/{state.num_train_epochs}' , force_update=_lowercase)
A_ = None
| 366 | 1 |
"""simple docstring"""
from torch import nn
class __snake_case ( nn.Module ):
def __init__( self , lowercase , lowercase) -> List[str]:
'''simple docstring'''
super().__init__()
a__: Optional[Any] = class_size
a__: Any = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
a__: List[str] = nn.Linear(lowercase , lowercase)
def lowerCamelCase_ ( self , lowercase) -> Optional[Any]:
'''simple docstring'''
a__: Dict = self.mlp(lowercase)
return logits
| 217 | """simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) ->str:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__: int = F'Expected string as input, found {type(_SCREAMING_SNAKE_CASE )}'
raise ValueError(_SCREAMING_SNAKE_CASE )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__: Dict = F'Expected boolean as use_pascal parameter, found {type(_SCREAMING_SNAKE_CASE )}'
raise ValueError(_SCREAMING_SNAKE_CASE )
a__: Any = input_str.split('_' )
a__: int = 0 if use_pascal else 1
a__: str = words[start_index:]
a__: Any = [word[0].upper() + word[1:] for word in words_to_capitalize]
a__: Dict = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 217 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = "gpt_neox"
def __init__( self : int ,A_ : Tuple=5_0432 ,A_ : Tuple=6144 ,A_ : Dict=44 ,A_ : Union[str, Any]=64 ,A_ : List[str]=2_4576 ,A_ : Dict="gelu" ,A_ : int=0.25 ,A_ : Optional[Any]=1_0000 ,A_ : str=0.0 ,A_ : Optional[Any]=0.0 ,A_ : List[Any]=0.1 ,A_ : List[Any]=2048 ,A_ : Union[str, Any]=0.02 ,A_ : List[str]=1e-5 ,A_ : str=True ,A_ : Dict=0 ,A_ : int=2 ,A_ : Optional[Any]=False ,A_ : Tuple=True ,A_ : Optional[Any]=None ,**A_ : Any ,) -> str:
super().__init__(bos_token_id=_snake_case ,eos_token_id=_snake_case ,**_snake_case )
A = vocab_size
A = max_position_embeddings
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = rotary_pct
A = rotary_emb_base
A = attention_dropout
A = hidden_dropout
A = classifier_dropout
A = initializer_range
A = layer_norm_eps
A = use_cache
A = tie_word_embeddings
A = use_parallel_residual
A = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'The hidden size is not divisble by the number of attention heads! Make sure to update them!' )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,_snake_case ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F'got {self.rope_scaling}' )
A = self.rope_scaling.get('type' ,_snake_case )
A = self.rope_scaling.get('factor' ,_snake_case )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(_snake_case ,_snake_case ) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' ) | 91 | """simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) -> int:
'''simple docstring'''
for attribute in key.split('.' ):
a__ = getattr(UpperCAmelCase__,UpperCAmelCase__ )
if weight_type is not None:
a__ = getattr(UpperCAmelCase__,UpperCAmelCase__ ).shape
else:
a__ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
a__ = value
elif weight_type == "weight_g":
a__ = value
elif weight_type == "weight_v":
a__ = value
elif weight_type == "bias":
a__ = value
else:
a__ = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) -> int:
'''simple docstring'''
a__ = []
a__ = fairseq_model.state_dict()
a__ = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
a__ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__,hf_model.config.feat_extract_norm == 'group',)
a__ = True
else:
for key, mapped_key in MAPPING.items():
a__ = 'hubert.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or (key.split('w2v_model.' )[-1] == name.split('.' )[0] and not is_finetuned):
a__ = True
if "*" in mapped_key:
a__ = name.split(UpperCAmelCase__ )[0].split('.' )[-2]
a__ = mapped_key.replace('*',UpperCAmelCase__ )
if "weight_g" in name:
a__ = 'weight_g'
elif "weight_v" in name:
a__ = 'weight_v'
elif "weight" in name:
a__ = 'weight'
elif "bias" in name:
a__ = 'bias'
else:
a__ = None
set_recursively(UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) -> str:
'''simple docstring'''
a__ = full_name.split('conv_layers.' )[-1]
a__ = name.split('.' )
a__ = int(items[0] )
a__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
a__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
a__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
a__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
a__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase__ )
@torch.no_grad()
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__=None,UpperCAmelCase__=None,UpperCAmelCase__=True ) -> int:
'''simple docstring'''
if config_path is not None:
a__ = HubertConfig.from_pretrained(UpperCAmelCase__ )
else:
a__ = HubertConfig()
if is_finetuned:
if dict_path:
a__ = Dictionary.load(UpperCAmelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a__ = target_dict.pad_index
a__ = target_dict.bos_index
a__ = target_dict.eos_index
a__ = len(target_dict.symbols )
a__ = os.path.join(UpperCAmelCase__,'vocab.json' )
if not os.path.isdir(UpperCAmelCase__ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(UpperCAmelCase__ ) )
return
os.makedirs(UpperCAmelCase__,exist_ok=UpperCAmelCase__ )
with open(UpperCAmelCase__,'w',encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices,UpperCAmelCase__ )
a__ = WavaVecaCTCTokenizer(
UpperCAmelCase__,unk_token=target_dict.unk_word,pad_token=target_dict.pad_word,bos_token=target_dict.bos_word,eos_token=target_dict.eos_word,word_delimiter_token='|',do_lower_case=UpperCAmelCase__,)
a__ = True if config.feat_extract_norm == 'layer' else False
a__ = WavaVecaFeatureExtractor(
feature_size=1,sampling_rate=1_60_00,padding_value=0,do_normalize=UpperCAmelCase__,return_attention_mask=UpperCAmelCase__,)
a__ = WavaVecaProcessor(feature_extractor=UpperCAmelCase__,tokenizer=UpperCAmelCase__ )
processor.save_pretrained(UpperCAmelCase__ )
a__ = HubertForCTC(UpperCAmelCase__ )
else:
a__ = HubertModel(UpperCAmelCase__ )
if is_finetuned:
a__ , a__ , a__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path],arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
a__ , a__ , a__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
a__ = model[0].eval()
recursively_load_weights(UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ )
hf_wavavec.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__magic_name__ = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 232 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "realm"
def __init__(self , _lowercase=30522 , _lowercase=768 , _lowercase=128 , _lowercase=12 , _lowercase=12 , _lowercase=8 , _lowercase=3072 , _lowercase="gelu_new" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=256 , _lowercase=10 , _lowercase=1e-3 , _lowercase=5 , _lowercase=320 , _lowercase=13353718 , _lowercase=5000 , _lowercase=1 , _lowercase=0 , _lowercase=2 , **_lowercase , ):
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
# Common config
__a : Any = vocab_size
__a : Union[str, Any] = max_position_embeddings
__a : int = hidden_size
__a : Optional[int] = retriever_proj_size
__a : int = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : Union[str, Any] = num_candidates
__a : int = intermediate_size
__a : List[Any] = hidden_act
__a : Dict = hidden_dropout_prob
__a : int = attention_probs_dropout_prob
__a : List[Any] = initializer_range
__a : str = type_vocab_size
__a : List[Any] = layer_norm_eps
# Reader config
__a : Any = span_hidden_size
__a : List[Any] = max_span_width
__a : Tuple = reader_layer_norm_eps
__a : Dict = reader_beam_size
__a : List[Any] = reader_seq_len
# Retrieval config
__a : str = num_block_records
__a : int = searcher_beam_size
| 63 |
"""simple docstring"""
import math
import sys
import cva
import numpy as np
def __magic_name__ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : float ):
# For applying gaussian function for each element in matrix.
__a : int = math.sqrt(_lowerCamelCase )
__a : Any = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __magic_name__ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
__a : Any = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : float ):
# Creates a gaussian kernel of given dimension.
__a : int = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _lowerCamelCase ):
for j in range(0 , _lowerCamelCase ):
__a : Any = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : int , ):
__a : Tuple = np.zeros(img.shape )
__a : Optional[int] = get_gauss_kernel(_lowerCamelCase , _lowerCamelCase )
__a , __a : int = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__a : List[str] = get_slice(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__a : Any = img_s - img_s[kernel_size // 2, kernel_size // 2]
__a : Optional[Any] = vec_gaussian(_lowerCamelCase , _lowerCamelCase )
__a : Optional[Any] = np.multiply(_lowerCamelCase , _lowerCamelCase )
__a : Any = np.multiply(_lowerCamelCase , _lowerCamelCase )
__a : Tuple = np.sum(_lowerCamelCase ) / np.sum(_lowerCamelCase )
__a : Optional[Any] = val
return imga
def __magic_name__ ( _lowerCamelCase : list ):
__a : Optional[Any] = args[1] if args[1:] else """../image_data/lena.jpg"""
__a : Union[str, Any] = float(args[2] ) if args[2:] else 1.0
__a : Optional[int] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__a : Any = int(args[4] )
__a : Any = kernel_size + abs(kernel_size % 2 - 1 )
else:
__a : Optional[int] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowercase__ , lowercase__ , lowercase__ , lowercase__ = parse_args(sys.argv)
lowercase__ = cva.imread(filename, 0)
cva.imshow("input image", img)
lowercase__ = img / 255
lowercase__ = out.astype("float32")
lowercase__ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowercase__ = out * 255
lowercase__ = np.uinta(out)
cva.imshow("output image", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 63 | 1 |
def a (lowerCAmelCase__ = 1_000_000 ):
__a = 1
__a = 1
__a = {1: 1}
for inputa in range(2 , lowerCAmelCase__ ):
__a = 0
__a = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__a = (3 * number) + 1
counter += 1
if inputa not in counters:
__a = counter
if counter > pre_counter:
__a = inputa
__a = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 99 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = (
"""This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."""
"""It takes two arguments named `image` which should be the original image, and `label` which should be a text """
"""describing the elements what should be identified in the segmentation mask. The tool returns the mask."""
)
_lowerCamelCase = """CIDAS/clipseg-rd64-refined"""
_lowerCamelCase = """image_segmenter"""
_lowerCamelCase = CLIPSegForImageSegmentation
_lowerCamelCase = ["""image""", """text"""]
_lowerCamelCase = ["""image"""]
def __init__( self , *__A , **__A ):
requires_backends(self , ["""vision"""] )
super().__init__(*__A , **__A )
def snake_case_ ( self , __A , __A ):
return self.pre_processor(text=[label] , images=[image] , padding=__A , return_tensors="""pt""" )
def snake_case_ ( self , __A ):
with torch.no_grad():
__a = self.model(**__A ).logits
return logits
def snake_case_ ( self , __A ):
__a = outputs.cpu().detach().numpy()
__a = 0
__a = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 99 | 1 |
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def _lowerCamelCase ( lowerCamelCase_ : List[Any] ):
"""simple docstring"""
UpperCAmelCase_ : str = [False] * len(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = [-1] * len(lowerCamelCase_ )
def dfs(lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ):
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Union[str, Any] = c
for u in graph[v]:
if not visited[u]:
dfs(lowerCamelCase_ , 1 - c )
for i in range(len(lowerCamelCase_ ) ):
if not visited[i]:
dfs(lowerCamelCase_ , 0 )
for i in range(len(lowerCamelCase_ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
snake_case__ : str = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 389 | '''simple docstring'''
def _lowerCamelCase ( lowerCamelCase_ : str , lowerCamelCase_ : int ):
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(lowerCamelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 389 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : float ,lowerCAmelCase_ : float ,lowerCAmelCase_ : float ) -> dict[str, float]:
"""simple docstring"""
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 220 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Tuple ,lowerCAmelCase_ : Optional[int] ,lowerCAmelCase_ : Union[str, Any] ,lowerCAmelCase_ : Optional[Any]=1024 ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] =[], []
SCREAMING_SNAKE_CASE_ : int =list(zip(lowerCAmelCase_ ,lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] =sorted_examples[0]
def is_too_big(lowerCAmelCase_ : Dict ):
return tok(lowerCAmelCase_ ,return_tensors='pt' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
SCREAMING_SNAKE_CASE_ : int =new_src + ' ' + src
SCREAMING_SNAKE_CASE_ : str =new_tgt + ' ' + tgt
if is_too_big(lowerCAmelCase_ ) or is_too_big(lowerCAmelCase_ ): # cant fit, finalize example
finished_src.append(lowerCAmelCase_ )
finished_tgt.append(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any =src, tgt
else: # can fit, keep adding
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] =cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(lowerCAmelCase_ )
finished_tgt.append(lowerCAmelCase_ )
return finished_src, finished_tgt
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Any ,lowerCAmelCase_ : Path ,lowerCAmelCase_ : List[Any] ,lowerCAmelCase_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] =Path(lowerCAmelCase_ )
save_path.mkdir(exist_ok=lowerCAmelCase_ )
for split in ["train"]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str =data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
SCREAMING_SNAKE_CASE_ : int =[x.rstrip() for x in Path(lowerCAmelCase_ ).open().readlines()]
SCREAMING_SNAKE_CASE_ : Dict =[x.rstrip() for x in Path(lowerCAmelCase_ ).open().readlines()]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int =pack_examples(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
print(F"""packed {split} split from {len(lowerCAmelCase_ )} examples -> {len(lowerCAmelCase_ )}.""" )
Path(save_path / F"""{split}.source""" ).open('w' ).write('\n'.join(lowerCAmelCase_ ) )
Path(save_path / F"""{split}.target""" ).open('w' ).write('\n'.join(lowerCAmelCase_ ) )
for split in ["val", "test"]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] =data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
shutil.copyfile(lowerCAmelCase_ ,save_path / F"""{split}.source""" )
shutil.copyfile(lowerCAmelCase_ ,save_path / F"""{split}.target""" )
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict =argparse.ArgumentParser()
parser.add_argument('--tok_name' ,type=lowerCAmelCase_ ,help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('--max_seq_len' ,type=lowerCAmelCase_ ,default=128 )
parser.add_argument('--data_dir' ,type=lowerCAmelCase_ )
parser.add_argument('--save_path' ,type=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Dict =parser.parse_args()
SCREAMING_SNAKE_CASE_ : Dict =AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(lowerCAmelCase_ ,Path(args.data_dir ) ,args.max_seq_len ,args.save_path )
if __name__ == "__main__":
packer_cli()
| 220 | 1 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=13 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=99 , __lowercase=32 , __lowercase=5 , __lowercase=4 , __lowercase=37 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=512 , __lowercase=16 , __lowercase=2 , __lowercase=0.02 , __lowercase=False , __lowercase=True , __lowercase="None" , __lowercase=3 , __lowercase=4 , __lowercase=None , ):
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_input_mask
UpperCAmelCase__ = use_token_type_ids
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = num_choices
UpperCAmelCase__ = relative_attention
UpperCAmelCase__ = position_biased_input
UpperCAmelCase__ = pos_att_type
UpperCAmelCase__ = scope
def A__ ( self ):
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def A__ ( self , __lowercase ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def A__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase__ = DebertaVaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase__ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
UpperCAmelCase__ = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
UpperCAmelCase__ = model(lowerCAmelCase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def A__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase__ = DebertaVaForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase__ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = DebertaVaForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase__ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowerCAmelCase_ )
def A__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = DebertaVaForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase__ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase__ = DebertaVaForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase__ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase__ = DebertaVaForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self ):
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : Tuple = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
__lowercase : Union[str, Any] = (
{
'feature-extraction': DebertaVaModel,
'fill-mask': DebertaVaForMaskedLM,
'question-answering': DebertaVaForQuestionAnswering,
'text-classification': DebertaVaForSequenceClassification,
'token-classification': DebertaVaForTokenClassification,
'zero-shot': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase : str = True
__lowercase : List[Any] = False
__lowercase : Dict = False
__lowercase : str = False
__lowercase : Tuple = False
def A__ ( self ):
UpperCAmelCase__ = DebertaVaModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def A__ ( self ):
self.config_tester.run_common_tests()
def A__ ( self ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_ )
def A__ ( self ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_ )
def A__ ( self ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_ )
def A__ ( self ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_ )
def A__ ( self ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_ )
def A__ ( self ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCAmelCase_ )
@slow
def A__ ( self ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = DebertaVaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="""Model not available yet""" )
def A__ ( self ):
pass
@slow
def A__ ( self ):
UpperCAmelCase__ = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
UpperCAmelCase__ = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
UpperCAmelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase__ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
# compare the actual values for a slice.
UpperCAmelCase__ = torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1e-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 712 |
"""simple docstring"""
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
UpperCAmelCase__ = old_name
if "patch_embed" in old_name:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = old_name.split(""".""" )
if layer == "0":
UpperCAmelCase__ = old_name.replace("""0""" , """convolution1""" )
elif layer == "1":
UpperCAmelCase__ = old_name.replace("""1""" , """batchnorm_before""" )
elif layer == "3":
UpperCAmelCase__ = old_name.replace("""3""" , """convolution2""" )
else:
UpperCAmelCase__ = old_name.replace("""4""" , """batchnorm_after""" )
if "network" in old_name and re.search(r"""\d\.\d""" , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ = r"""\b\d{2}\b"""
if bool(re.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase__ = re.search(r"""\d\.\d\d.""" , _SCREAMING_SNAKE_CASE ).group()
else:
UpperCAmelCase__ = re.search(r"""\d\.\d.""" , _SCREAMING_SNAKE_CASE ).group()
if int(match[0] ) < 6:
UpperCAmelCase__ = old_name.replace(_SCREAMING_SNAKE_CASE , """""" )
UpperCAmelCase__ = trimmed_name.replace("""network""" , match[0] + """.meta4D_layers.blocks.""" + match[2:-1] )
UpperCAmelCase__ = """intermediate_stages.""" + trimmed_name
else:
UpperCAmelCase__ = old_name.replace(_SCREAMING_SNAKE_CASE , """""" )
if int(match[2] ) < num_meta4D_last_stage:
UpperCAmelCase__ = trimmed_name.replace("""network""" , """meta4D_layers.blocks.""" + match[2] )
else:
UpperCAmelCase__ = str(int(match[2] ) - num_meta4D_last_stage )
UpperCAmelCase__ = trimmed_name.replace("""network""" , """meta3D_layers.blocks.""" + layer_index )
if "norm1" in old_name:
UpperCAmelCase__ = trimmed_name.replace("""norm1""" , """layernorm1""" )
elif "norm2" in old_name:
UpperCAmelCase__ = trimmed_name.replace("""norm2""" , """layernorm2""" )
elif "fc1" in old_name:
UpperCAmelCase__ = trimmed_name.replace("""fc1""" , """linear_in""" )
elif "fc2" in old_name:
UpperCAmelCase__ = trimmed_name.replace("""fc2""" , """linear_out""" )
UpperCAmelCase__ = """last_stage.""" + trimmed_name
elif "network" in old_name and re.search(r""".\d.""" , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ = old_name.replace("""network""" , """intermediate_stages""" )
if "fc" in new_name:
UpperCAmelCase__ = new_name.replace("""fc""" , """convolution""" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
UpperCAmelCase__ = new_name.replace("""norm1""" , """batchnorm_before""" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
UpperCAmelCase__ = new_name.replace("""norm2""" , """batchnorm_after""" )
if "proj" in new_name:
UpperCAmelCase__ = new_name.replace("""proj""" , """projection""" )
if "dist_head" in new_name:
UpperCAmelCase__ = new_name.replace("""dist_head""" , """distillation_classifier""" )
elif "head" in new_name:
UpperCAmelCase__ = new_name.replace("""head""" , """classifier""" )
elif "patch_embed" in new_name:
UpperCAmelCase__ = """efficientformer.""" + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
UpperCAmelCase__ = new_name.replace("""norm""" , """layernorm""" )
UpperCAmelCase__ = """efficientformer.""" + new_name
else:
UpperCAmelCase__ = """efficientformer.encoder.""" + new_name
return new_name
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
for key in checkpoint.copy().keys():
UpperCAmelCase__ = checkpoint.pop(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = val
return checkpoint
def snake_case__ ( ) ->Optional[Any]:
UpperCAmelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase__ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return image
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
UpperCAmelCase__ = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )["""model"""]
UpperCAmelCase__ = EfficientFormerConfig.from_json_file(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = EfficientFormerForImageClassificationWithTeacher(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = """_""".join(checkpoint_path.split("""/""" )[-1].split(""".""" )[0].split("""_""" )[:-1] )
UpperCAmelCase__ = config.depths[-1] - config.num_metaad_blocks + 1
UpperCAmelCase__ = convert_torch_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase__ = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
# prepare image
UpperCAmelCase__ = prepare_img()
UpperCAmelCase__ = 2_5_6
UpperCAmelCase__ = 2_2_4
UpperCAmelCase__ = EfficientFormerImageProcessor(
size={"""shortest_edge""": image_size} , crop_size={"""height""": crop_size, """width""": crop_size} , resample=pillow_resamplings["""bicubic"""] , )
UpperCAmelCase__ = processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
# original processing pipeline
UpperCAmelCase__ = Compose(
[
Resize(_SCREAMING_SNAKE_CASE , interpolation=pillow_resamplings["""bicubic"""] ),
CenterCrop(_SCREAMING_SNAKE_CASE ),
ToTensor(),
Normalize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
] )
UpperCAmelCase__ = image_transforms(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = model(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = outputs.logits
UpperCAmelCase__ = (1, 1_0_0_0)
if "l1" in model_name:
UpperCAmelCase__ = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :1_0] , _SCREAMING_SNAKE_CASE , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
UpperCAmelCase__ = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :1_0] , _SCREAMING_SNAKE_CASE , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
UpperCAmelCase__ = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print("""Pushing model to the hub...""" )
model.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message="""Add model""" , use_temp_dir=_SCREAMING_SNAKE_CASE , )
processor.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message="""Add image processor""" , use_temp_dir=_SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
a : Optional[int] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 422 | 0 |
"""simple docstring"""
from collections import defaultdict
class lowerCamelCase :
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
UpperCAmelCase__ : Tuple = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(_lowerCamelCase ) )
]
UpperCAmelCase__ : str = defaultdict(_lowerCamelCase ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
UpperCAmelCase__ : List[str] = (1 << len(_lowerCamelCase )) - 1
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
UpperCAmelCase__ : Dict = self.count_ways_until(_lowerCamelCase , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
UpperCAmelCase__ : Dict = total_ways_util
return self.dp[mask][task_no]
def _a (self , _lowerCamelCase ):
"""simple docstring"""
for i in range(len(_lowerCamelCase ) ):
for j in task_performed[i]:
self.task[j].append(_lowerCamelCase )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
_A = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
_A = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 182 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 182 | 1 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
snake_case_ : Optional[Any] =logging.get_logger(__name__)
class a__ ( lowerCAmelCase__ ):
def __init__( self , lowercase__ ) -> str:
super().__init__()
__A = nn.ModuleList(lowercase__ )
def _lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = True , ) -> Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(lowercase__ , lowercase__ , self.nets ) ):
__A , __A = controlnet(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
# merge samples
if i == 0:
__A , __A = down_samples, mid_sample
else:
__A = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowercase__ , lowercase__ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def _lowerCamelCase ( self , lowercase__ , lowercase__ = True , lowercase__ = None , lowercase__ = False , lowercase__ = None , ) -> Optional[int]:
__A = 0
__A = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowercase__ , is_main_process=lowercase__ , save_function=lowercase__ , safe_serialization=lowercase__ , variant=lowercase__ , )
idx += 1
__A = model_path_to_save + F"""_{idx}"""
@classmethod
def _lowerCamelCase ( cls , lowercase__ , **lowercase__ ) -> int:
__A = 0
__A = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__A = pretrained_model_path
while os.path.isdir(lowercase__ ):
__A = ControlNetModel.from_pretrained(lowercase__ , **lowercase__ )
controlnets.append(lowercase__ )
idx += 1
__A = pretrained_model_path + F"""_{idx}"""
logger.info(F"""{len(lowercase__ )} controlnets loaded from {pretrained_model_path}.""" )
if len(lowercase__ ) == 0:
raise ValueError(
F"""No ControlNets found under {os.path.dirname(lowercase__ )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(lowercase__ )
| 205 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class a__ :
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=10 , lowercase__=3 , lowercase__=2 , lowercase__=2 , lowercase__=True , lowercase__=True , lowercase__=32 , lowercase__=5 , lowercase__=4 , lowercase__=37 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=10 , lowercase__=0.02 , lowercase__="divided_space_time" , lowercase__=None , ) -> Any:
__A = parent
__A = batch_size
__A = image_size
__A = num_channels
__A = patch_size
__A = num_frames
__A = is_training
__A = use_labels
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = attention_type
__A = initializer_range
__A = scope
__A = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
__A = (image_size // patch_size) ** 2
__A = (num_frames) * self.num_patches_per_frame + 1
def _lowerCamelCase ( self ) -> Any:
__A = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] , self.num_labels )
__A = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ) -> List[Any]:
__A = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
__A = self.num_labels
return config
def _lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ ) -> Dict:
__A = TimesformerModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__A = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ ) -> List[str]:
__A = TimesformerForVideoClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__A = model(lowercase__ )
# verify the logits shape
__A = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowercase__ )
def _lowerCamelCase ( self ) -> Union[str, Any]:
__A = self.prepare_config_and_inputs()
__A , __A , __A = config_and_inputs
__A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
UpperCAmelCase_ : Optional[Any] = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
UpperCAmelCase_ : Optional[Any] = (
{'feature-extraction': TimesformerModel, 'video-classification': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : str = False
UpperCAmelCase_ : str = False
UpperCAmelCase_ : Dict = False
def _lowerCamelCase ( self ) -> int:
__A = TimesformerModelTester(self )
__A = ConfigTester(
self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=37 )
def _lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__=False ) -> int:
__A = copy.deepcopy(lowercase__ )
if return_labels:
if model_class in get_values(lowercase__ ):
__A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase__ )
return inputs_dict
def _lowerCamelCase ( self ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def _lowerCamelCase ( self ) -> Optional[Any]:
pass
def _lowerCamelCase ( self ) -> int:
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(lowercase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear ) )
def _lowerCamelCase ( self ) -> int:
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(lowercase__ )
__A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase__ )
def _lowerCamelCase ( self ) -> List[Any]:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def _lowerCamelCase ( self ) -> Dict:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowercase__ )
@slow
def _lowerCamelCase ( self ) -> Optional[Any]:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = TimesformerModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def _lowerCamelCase ( self ) -> str:
if not self.has_attentions:
pass
else:
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = True
for model_class in self.all_model_classes:
__A = self.model_tester.seq_length
__A = self.model_tester.num_frames
__A = True
__A = False
__A = True
__A = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
__A = outputs.attentions
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__A = True
__A = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
__A = outputs.attentions
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
__A = len(lowercase__ )
# Check attention is always last and order is fine
__A = True
__A = True
__A = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(out_len + 1 , len(lowercase__ ) )
__A = outputs.attentions
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def _lowerCamelCase ( self ) -> Union[str, Any]:
def check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ ):
__A = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
__A = outputs.hidden_states
__A = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase__ ) , lowercase__ )
__A = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
def UpperCAmelCase ( ):
'''simple docstring'''
__A = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
__A = np.load(lowerCAmelCase__ )
return list(lowerCAmelCase__ )
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ) -> int:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self ) -> Optional[int]:
__A = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
lowercase__ )
__A = self.default_image_processor
__A = prepare_video()
__A = image_processor(video[:8] , return_tensors="pt" ).to(lowercase__ )
# forward pass
with torch.no_grad():
__A = model(**lowercase__ )
# verify the logits
__A = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowercase__ )
__A = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1e-4 ) )
| 205 | 1 |
def _a ( lowerCAmelCase = 10 )-> List[Any]:
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or n < 0:
raise ValueError('Invalid input' )
SCREAMING_SNAKE_CASE_ = 10**n
SCREAMING_SNAKE_CASE_ = 28433 * (pow(2 , 7830457 , UpperCamelCase_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(1_0) = }""") | 360 |
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__lowerCamelCase = logging.get_logger(__name__)
def a__ ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
UpperCAmelCase__ :Any = os.getenv('''SM_HP_MP_PARAMETERS''', '''{}''' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
UpperCAmelCase__ :Any = json.loads(UpperCamelCase_ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
UpperCAmelCase__ :Optional[Any] = os.getenv('''SM_FRAMEWORK_PARAMS''', '''{}''' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
UpperCAmelCase__ :str = json.loads(UpperCamelCase_ )
if not mpi_options.get('''sagemaker_mpi_enabled''', UpperCamelCase_ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('''smdistributed''' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class UpperCAmelCase ( _snake_case ):
UpperCAmelCase = field(
default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
super().__post_init__()
warnings.warn(
'''`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '''
'''`TrainingArguments` instead.''' , __lowerCamelCase , )
@cached_property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
logger.info('''PyTorch: setting up devices''' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'''torch.distributed process group is initialized, but local_rank == -1. '''
'''In order to use Torch DDP, launch your script with `python -m torch.distributed.launch''' )
if self.no_cuda:
UpperCAmelCase__ :Union[str, Any] = torch.device('''cpu''' )
UpperCAmelCase__ :Dict = 0
elif is_sagemaker_model_parallel_available():
UpperCAmelCase__ :Dict = smp.local_rank()
UpperCAmelCase__ :List[str] = torch.device('''cuda''' , __lowerCamelCase )
UpperCAmelCase__ :Optional[int] = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='''smddp''' , timeout=self.ddp_timeout_delta )
UpperCAmelCase__ :int = int(os.getenv('''SMDATAPARALLEL_LOCAL_RANK''' ) )
UpperCAmelCase__ :int = torch.device('''cuda''' , self.local_rank )
UpperCAmelCase__ :Union[str, Any] = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
UpperCAmelCase__ :str = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
UpperCAmelCase__ :str = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='''nccl''' , timeout=self.ddp_timeout_delta )
UpperCAmelCase__ :str = torch.device('''cuda''' , self.local_rank )
UpperCAmelCase__ :Union[str, Any] = 1
if device.type == "cuda":
torch.cuda.set_device(__lowerCamelCase )
return device
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
return not is_sagemaker_model_parallel_available()
@property
def __SCREAMING_SNAKE_CASE ( self : int ):
return False
| 467 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCamelCase : List[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def __lowerCamelCase ( A__ , A__ , A__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = state_dict.pop(A__ )
UpperCamelCase = val
def __lowerCamelCase ( A__ ) -> str:
"""simple docstring"""
UpperCamelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCamelCase = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
UpperCamelCase = value
else:
UpperCamelCase = value
return new_state_dict
def __lowerCamelCase ( A__ , A__=False ) -> str:
"""simple docstring"""
UpperCamelCase = ''
if is_panoptic:
UpperCamelCase = 'conditional_detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:256, :]
UpperCamelCase = in_proj_bias[:256]
UpperCamelCase = in_proj_weight[256:512, :]
UpperCamelCase = in_proj_bias[256:512]
UpperCamelCase = in_proj_weight[-256:, :]
UpperCamelCase = in_proj_bias[-256:]
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
UpperCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( A__ , A__ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCamelCase = 'resnet101'
if "dc5" in model_name:
UpperCamelCase = True
UpperCamelCase = 'panoptic' in model_name
if is_panoptic:
UpperCamelCase = 250
else:
UpperCamelCase = 91
UpperCamelCase = 'huggingface/label-files'
UpperCamelCase = 'coco-detection-id2label.json'
UpperCamelCase = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) )
UpperCamelCase = {int(A__ ): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
# load image processor
UpperCamelCase = 'coco_panoptic' if is_panoptic else 'coco_detection'
UpperCamelCase = ConditionalDetrImageProcessor(format=A__ )
# prepare image
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A__ , return_tensors='pt' )
UpperCamelCase = encoding['pixel_values']
logger.info(F"""Converting model {model_name}...""" )
# load original model from torch hub
UpperCamelCase = torch.hub.load('DeppMeng/ConditionalDETR' , A__ , pretrained=A__ ).eval()
UpperCamelCase = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCamelCase = 'conditional_detr.' + src
rename_key(A__ , A__ , A__ )
UpperCamelCase = rename_backbone_keys(A__ )
# query, key and value matrices need special treatment
read_in_q_k_v(A__ , is_panoptic=A__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase = 'conditional_detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('conditional_detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
UpperCamelCase = state_dict.pop(A__ )
UpperCamelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCamelCase = state_dict.pop(A__ )
UpperCamelCase = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
UpperCamelCase = state_dict.pop(A__ )
UpperCamelCase = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
UpperCamelCase = state_dict.pop(A__ )
UpperCamelCase = val
# finally, create HuggingFace model and load state dict
UpperCamelCase = ConditionalDetrForSegmentation(A__ ) if is_panoptic else ConditionalDetrForObjectDetection(A__ )
model.load_state_dict(A__ )
model.eval()
model.push_to_hub(repo_id=A__ , organization='DepuMeng' , commit_message='Add model' )
# verify our conversion
UpperCamelCase = conditional_detr(A__ )
UpperCamelCase = model(A__ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
_lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
_lowerCamelCase : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 719 |
'''simple docstring'''
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __lowerCamelCase ( A__ , A__="shi-labs/oneformer_demo" ) -> Tuple:
"""simple docstring"""
with open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) as f:
UpperCamelCase = json.load(A__ )
UpperCamelCase = {}
UpperCamelCase = []
UpperCamelCase = []
for key, info in class_info.items():
UpperCamelCase = info['name']
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(A__ ) )
UpperCamelCase = thing_ids
UpperCamelCase = class_names
return metadata
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any]=7 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Dict=3_0 , UpperCamelCase__ : Tuple=4_0_0 , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Any=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : List[str]=[0.5, 0.5, 0.5] , UpperCamelCase__ : List[str]=[0.5, 0.5, 0.5] , UpperCamelCase__ : Optional[int]=1_0 , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : str=2_5_5 , UpperCamelCase__ : Optional[Any]="shi-labs/oneformer_demo" , UpperCamelCase__ : Any="ade20k_panoptic.json" , UpperCamelCase__ : Dict=1_0 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = {'shortest_edge': 3_2, 'longest_edge': 1_3_3_3} if size is None else size
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
UpperCamelCase = class_info_file
UpperCamelCase = prepare_metadata(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = num_text
UpperCamelCase = repo_path
# for the post_process_functions
UpperCamelCase = 2
UpperCamelCase = 1_0
UpperCamelCase = 1_0
UpperCamelCase = 3
UpperCamelCase = 4
UpperCamelCase = num_labels
UpperCamelCase = do_reduce_labels
UpperCamelCase = ignore_index
def A ( self : Dict ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def A ( self : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=False ):
"""simple docstring"""
if not batched:
UpperCamelCase = image_inputs[0]
if isinstance(UpperCamelCase__ , Image.Image ):
UpperCamelCase , UpperCamelCase = image.size
else:
UpperCamelCase , UpperCamelCase = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase = int(self.size['shortest_edge'] * h / w )
UpperCamelCase = self.size['shortest_edge']
elif w > h:
UpperCamelCase = self.size['shortest_edge']
UpperCamelCase = int(self.size['shortest_edge'] * w / h )
else:
UpperCamelCase = self.size['shortest_edge']
UpperCamelCase = self.size['shortest_edge']
else:
UpperCamelCase = []
for image in image_inputs:
UpperCamelCase , UpperCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase = max(UpperCamelCase__ , key=lambda UpperCamelCase__ : item[0] )[0]
UpperCamelCase = max(UpperCamelCase__ , key=lambda UpperCamelCase__ : item[1] )[1]
return expected_height, expected_width
def A ( self : List[str] ):
"""simple docstring"""
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
_SCREAMING_SNAKE_CASE = image_processing_class
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = OneFormerImageProcessorTester(self )
@property
def A ( self : int ):
"""simple docstring"""
return self.image_processing_tester.prepare_image_processor_dict()
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'image_std' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'size' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'ignore_index' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'class_info_file' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'num_text' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'repo_path' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'metadata' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'do_reduce_labels' ) )
def A ( self : int ):
"""simple docstring"""
pass
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
UpperCamelCase = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processing_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase , UpperCamelCase = self.image_processing_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
UpperCamelCase = image_processor(
UpperCamelCase__ , ['semantic'] * len(UpperCamelCase__ ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processing_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase , UpperCamelCase = self.image_processing_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
UpperCamelCase = image_processor(
UpperCamelCase__ , ['semantic'] * len(UpperCamelCase__ ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processing_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase , UpperCamelCase = self.image_processing_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
UpperCamelCase = image_processor(
UpperCamelCase__ , ['semantic'] * len(UpperCamelCase__ ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : str , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : str="np" ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
UpperCamelCase = self.image_processing_tester.num_labels
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase__ )
if with_segmentation_maps:
UpperCamelCase = num_labels
if is_instance_map:
UpperCamelCase = list(range(UpperCamelCase__ ) ) * 2
UpperCamelCase = dict(enumerate(UpperCamelCase__ ) )
UpperCamelCase = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
UpperCamelCase = [Image.fromarray(UpperCamelCase__ ) for annotation in annotations]
UpperCamelCase = image_processor(
UpperCamelCase__ , ['semantic'] * len(UpperCamelCase__ ) , UpperCamelCase__ , return_tensors='pt' , instance_id_to_semantic_id=UpperCamelCase__ , pad_and_return_pixel_mask=UpperCamelCase__ , )
return inputs
def A ( self : str ):
"""simple docstring"""
pass
def A ( self : Union[str, Any] ):
"""simple docstring"""
def common(UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : List[str]=None ):
UpperCamelCase = self.comm_get_image_processor_inputs(
with_segmentation_maps=UpperCamelCase__ , is_instance_map=UpperCamelCase__ , segmentation_type=UpperCamelCase__ )
UpperCamelCase = inputs['mask_labels']
UpperCamelCase = inputs['class_labels']
UpperCamelCase = inputs['pixel_values']
UpperCamelCase = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(UpperCamelCase__ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=UpperCamelCase__ )
common(is_instance_map=UpperCamelCase__ , segmentation_type='pil' )
common(is_instance_map=UpperCamelCase__ , segmentation_type='pil' )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = np.zeros((2_0, 5_0) )
UpperCamelCase = 1
UpperCamelCase = 1
UpperCamelCase = 1
UpperCamelCase = binary_mask_to_rle(UpperCamelCase__ )
self.assertEqual(len(UpperCamelCase__ ) , 4 )
self.assertEqual(rle[0] , 2_1 )
self.assertEqual(rle[1] , 4_5 )
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
UpperCamelCase = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCamelCase = fature_extractor.post_process_semantic_segmentation(UpperCamelCase__ )
self.assertEqual(len(UpperCamelCase__ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
UpperCamelCase = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
UpperCamelCase = fature_extractor.post_process_semantic_segmentation(UpperCamelCase__ , target_sizes=UpperCamelCase__ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
UpperCamelCase = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCamelCase = image_processor.post_process_instance_segmentation(UpperCamelCase__ , threshold=0 )
self.assertTrue(len(UpperCamelCase__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase__ )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
UpperCamelCase = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCamelCase = image_processor.post_process_panoptic_segmentation(UpperCamelCase__ , threshold=0 )
self.assertTrue(len(UpperCamelCase__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase__ )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 324 | 0 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : torch.FloatTensor
lowercase : Optional[torch.FloatTensor] = None
def UpperCAmelCase_ (_lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str]=0.999 , _lowerCAmelCase : str="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(_lowerCAmelCase : Any ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_lowerCAmelCase : Tuple ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__UpperCamelCase : Dict = []
for i in range(_lowerCAmelCase ):
__UpperCamelCase : Union[str, Any] = i / num_diffusion_timesteps
__UpperCamelCase : Tuple = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowerCAmelCase ) / alpha_bar_fn(_lowerCAmelCase ) , _lowerCAmelCase ) )
return torch.tensor(_lowerCAmelCase , dtype=torch.floataa )
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
@register_to_config
def __init__( self , __UpperCamelCase = 10_00 , __UpperCamelCase = "fixed_small_log" , __UpperCamelCase = True , __UpperCamelCase = 1.0 , __UpperCamelCase = "epsilon" , __UpperCamelCase = "squaredcos_cap_v2" , ) -> Tuple:
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
__UpperCamelCase : List[str] = betas_for_alpha_bar(__UpperCamelCase )
__UpperCamelCase : Optional[Any] = 1.0 - self.betas
__UpperCamelCase : List[Any] = torch.cumprod(self.alphas , dim=0 )
__UpperCamelCase : Optional[int] = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
__UpperCamelCase : List[str] = 1.0
# setable values
__UpperCamelCase : List[str] = None
__UpperCamelCase : str = torch.from_numpy(np.arange(0 , __UpperCamelCase )[::-1].copy() )
__UpperCamelCase : Union[str, Any] = variance_type
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : Any = num_inference_steps
__UpperCamelCase : List[str] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
__UpperCamelCase : List[str] = (np.arange(0 , __UpperCamelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
__UpperCamelCase : Any = torch.from_numpy(__UpperCamelCase ).to(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None ) -> Any:
'''simple docstring'''
if prev_timestep is None:
__UpperCamelCase : Union[str, Any] = t - 1
__UpperCamelCase : Union[str, Any] = self.alphas_cumprod[t]
__UpperCamelCase : Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__UpperCamelCase : int = 1 - alpha_prod_t
__UpperCamelCase : Union[str, Any] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__UpperCamelCase : str = self.betas[t]
else:
__UpperCamelCase : Optional[int] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__UpperCamelCase : Union[str, Any] = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
__UpperCamelCase : Any = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
__UpperCamelCase : Dict = torch.log(torch.clamp(__UpperCamelCase , min=1E-20 ) )
__UpperCamelCase : Dict = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
__UpperCamelCase : Tuple = variance.log()
__UpperCamelCase : str = beta.log()
__UpperCamelCase : Union[str, Any] = (predicted_variance + 1) / 2
__UpperCamelCase : Union[str, Any] = frac * max_log + (1 - frac) * min_log
return variance
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase=None , __UpperCamelCase = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
'''simple docstring'''
__UpperCamelCase : Dict = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
__UpperCamelCase , __UpperCamelCase : Optional[Any] = torch.split(__UpperCamelCase , sample.shape[1] , dim=1 )
else:
__UpperCamelCase : Any = None
# 1. compute alphas, betas
if prev_timestep is None:
__UpperCamelCase : List[str] = t - 1
__UpperCamelCase : Optional[int] = self.alphas_cumprod[t]
__UpperCamelCase : List[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__UpperCamelCase : Tuple = 1 - alpha_prod_t
__UpperCamelCase : Tuple = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__UpperCamelCase : Any = self.betas[t]
__UpperCamelCase : Any = self.alphas[t]
else:
__UpperCamelCase : List[Any] = 1 - alpha_prod_t / alpha_prod_t_prev
__UpperCamelCase : Union[str, Any] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__UpperCamelCase : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__UpperCamelCase : Any = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__UpperCamelCase : Optional[int] = torch.clamp(
__UpperCamelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__UpperCamelCase : Dict = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
__UpperCamelCase : List[str] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__UpperCamelCase : List[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__UpperCamelCase : int = 0
if t > 0:
__UpperCamelCase : str = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=__UpperCamelCase , device=model_output.device )
__UpperCamelCase : int = self._get_variance(
__UpperCamelCase , predicted_variance=__UpperCamelCase , prev_timestep=__UpperCamelCase , )
if self.variance_type == "fixed_small_log":
__UpperCamelCase : Any = variance
elif self.variance_type == "learned_range":
__UpperCamelCase : List[Any] = (0.5 * variance).exp()
else:
raise ValueError(
f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
" for the UnCLIPScheduler." )
__UpperCamelCase : Tuple = variance * variance_noise
__UpperCamelCase : Optional[int] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__UpperCamelCase , pred_original_sample=__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> torch.FloatTensor:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
__UpperCamelCase : Any = timesteps.to(original_samples.device )
__UpperCamelCase : List[Any] = alphas_cumprod[timesteps] ** 0.5
__UpperCamelCase : List[str] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
__UpperCamelCase : List[Any] = sqrt_alpha_prod.unsqueeze(-1 )
__UpperCamelCase : Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
__UpperCamelCase : Union[str, Any] = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
__UpperCamelCase : Tuple = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
__UpperCamelCase : Optional[int] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples | 327 |
def UpperCAmelCase_ (_lowerCAmelCase : int = 60_08_51_47_51_43 ):
try:
__UpperCamelCase : Optional[Any] = int(_lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
__UpperCamelCase : List[Any] = 2
__UpperCamelCase : int = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__UpperCamelCase : int = i
while n % i == 0:
__UpperCamelCase : Optional[Any] = n // i
i += 1
return int(_lowerCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""") | 327 | 1 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase ( _A , _A , unittest.TestCase ):
snake_case_ = IFInpaintingSuperResolutionPipeline
snake_case_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
snake_case_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
snake_case_ = PipelineTesterMixin.required_optional_params - {"latents"}
def _lowerCamelCase ( self ):
return self._get_superresolution_dummy_components()
def _lowerCamelCase ( self , a_ , a_=0 ):
if str(a_ ).startswith("mps" ):
lowerCAmelCase : List[str] = torch.manual_seed(a_ )
else:
lowerCAmelCase : Dict = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCAmelCase : Any = floats_tensor((1, 3, 16, 16) , rng=random.Random(a_ ) ).to(a_ )
lowerCAmelCase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
lowerCAmelCase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
lowerCAmelCase : Optional[int] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _lowerCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _lowerCamelCase ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _lowerCamelCase ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _lowerCamelCase ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _lowerCamelCase ( self ):
self._test_save_load_local()
def _lowerCamelCase ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 551 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def __A ( a_ : int ):
return EnvironmentCommand()
def __A ( a_ : Tuple ):
return EnvironmentCommand(args.accelerate_config_file )
class lowerCamelCase ( _A ):
@staticmethod
def _lowerCamelCase ( a_ ):
lowerCAmelCase : int = parser.add_parser("env" )
download_parser.set_defaults(func=a_ )
download_parser.add_argument(
"--accelerate-config_file" , default=a_ , help="The accelerate config file to use for the default values in the launching script." , )
download_parser.set_defaults(func=a_ )
def __init__( self , a_ , *a_ ):
lowerCAmelCase : List[str] = accelerate_config_file
def _lowerCamelCase ( self ):
lowerCAmelCase : int = "not installed"
if is_safetensors_available():
import safetensors
lowerCAmelCase : Optional[int] = safetensors.__version__
elif importlib.util.find_spec("safetensors" ) is not None:
import safetensors
lowerCAmelCase : Optional[int] = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
lowerCAmelCase : int = "not installed"
lowerCAmelCase : List[str] = "not found"
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
lowerCAmelCase : int = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(a_ ):
lowerCAmelCase : int = load_config_from_file(self._accelerate_config_file ).to_dict()
lowerCAmelCase : Optional[int] = (
"\n".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(a_ , a_ )
else F'''\t{accelerate_config}'''
)
lowerCAmelCase : str = "not installed"
lowerCAmelCase : Union[str, Any] = "NA"
if is_torch_available():
import torch
lowerCAmelCase : Optional[Any] = torch.__version__
lowerCAmelCase : List[Any] = torch.cuda.is_available()
lowerCAmelCase : int = "not installed"
lowerCAmelCase : Dict = "NA"
if is_tf_available():
import tensorflow as tf
lowerCAmelCase : Optional[int] = tf.__version__
try:
# deprecated in v2.1
lowerCAmelCase : List[str] = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
lowerCAmelCase : List[Any] = bool(tf.config.list_physical_devices("GPU" ) )
lowerCAmelCase : Union[str, Any] = "not installed"
lowerCAmelCase : Any = "not installed"
lowerCAmelCase : str = "not installed"
lowerCAmelCase : Dict = "NA"
if is_flax_available():
import flax
import jax
import jaxlib
lowerCAmelCase : Union[str, Any] = flax.__version__
lowerCAmelCase : int = jax.__version__
lowerCAmelCase : Optional[Any] = jaxlib.__version__
lowerCAmelCase : int = jax.lib.xla_bridge.get_backend().platform
lowerCAmelCase : Optional[Any] = {
"`transformers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Huggingface_hub version": huggingface_hub.__version__,
"Safetensors version": F'''{safetensors_version}''',
"Accelerate version": F'''{accelerate_version}''',
"Accelerate config": F'''{accelerate_config_str}''',
"PyTorch version (GPU?)": F'''{pt_version} ({pt_cuda_available})''',
"Tensorflow version (GPU?)": F'''{tf_version} ({tf_cuda_available})''',
"Flax version (CPU?/GPU?/TPU?)": F'''{flax_version} ({jax_backend})''',
"Jax version": F'''{jax_version}''',
"JaxLib version": F'''{jaxlib_version}''',
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(a_ ) )
return info
@staticmethod
def _lowerCamelCase ( a_ ):
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 551 | 1 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str=1_3 , SCREAMING_SNAKE_CASE_ : Tuple=7 , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=9_9 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3_2 , SCREAMING_SNAKE_CASE_ : List[str]=5 , SCREAMING_SNAKE_CASE_ : Any=4 , SCREAMING_SNAKE_CASE_ : Dict=6_4 , SCREAMING_SNAKE_CASE_ : Any="gelu" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE_ : str=1_6 , SCREAMING_SNAKE_CASE_ : Any=2 , SCREAMING_SNAKE_CASE_ : Tuple=0.02 , SCREAMING_SNAKE_CASE_ : str=3 , SCREAMING_SNAKE_CASE_ : Dict=4 , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : int=2 , SCREAMING_SNAKE_CASE_ : Tuple=2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : Dict=2 , SCREAMING_SNAKE_CASE_ : Any=4 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1 , ) -> Dict:
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
lowercase_ = q_groups
lowercase_ = k_groups
lowercase_ = v_groups
lowercase_ = post_attention_groups
lowercase_ = intermediate_groups
lowercase_ = output_groups
def _lowercase ( self : Any ) -> Optional[int]:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : List[Any] ) -> Optional[int]:
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[str]:
lowercase_ = SqueezeBertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ) -> Dict:
lowercase_ = SqueezeBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int ) -> List[Any]:
lowercase_ = SqueezeBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Optional[Any]:
lowercase_ = self.num_labels
lowercase_ = SqueezeBertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int ) -> Optional[int]:
lowercase_ = self.num_labels
lowercase_ = SqueezeBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[Any]:
lowercase_ = self.num_choices
lowercase_ = SqueezeBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
lowercase_ = self.prepare_config_and_inputs()
((lowercase_) , (lowercase_) , (lowercase_) , (lowercase_) , (lowercase_) , (lowercase_)) = config_and_inputs
lowercase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase__( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Any = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
a :Tuple = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a :Union[str, Any] = False
a :Union[str, Any] = True
a :Optional[Any] = False
def _lowercase ( self : Optional[int] ) -> Tuple:
lowercase_ = SqueezeBertModelTester(self )
lowercase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , dim=3_7 )
def _lowercase ( self : int ) -> Tuple:
self.config_tester.run_common_tests()
def _lowercase ( self : Optional[Any] ) -> List[str]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[Any] ) -> Any:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict ) -> Dict:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict ) -> int:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[Any] ) -> Dict:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
@slow
def _lowercase ( self : Union[str, Any] ) -> Dict:
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = SqueezeBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_sentencepiece
@require_tokenizers
@require_torch
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : Dict ) -> Dict:
lowercase_ = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
lowercase_ = torch.tensor([[1, 2_9_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 1_3, 1_5_8_8, 2]] )
lowercase_ = model(SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = torch.Size((1, 3) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.tensor([[0.64_01, -0.03_49, -0.60_41]] )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 97 |
from __future__ import annotations
def a ( snake_case__: Optional[int] , snake_case__: Optional[int] , snake_case__: Any , snake_case__: Optional[int] ): # noqa: E741
'''simple docstring'''
while r - l > 1:
lowercase_ = (l + r) // 2
if v[m] >= key:
lowercase_ = m
else:
lowercase_ = m # noqa: E741
return r
def a ( snake_case__: list[int] ):
'''simple docstring'''
if len(snake_case__ ) == 0:
return 0
lowercase_ = [0] * len(snake_case__ )
lowercase_ = 1
lowercase_ = v[0]
for i in range(1 , len(snake_case__ ) ):
if v[i] < tail[0]:
lowercase_ = v[i]
elif v[i] > tail[length - 1]:
lowercase_ = v[i]
length += 1
else:
lowercase_ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 97 | 1 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase (__lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = AudioLDMPipeline
UpperCAmelCase_ = TEXT_TO_AUDIO_PARAMS
UpperCAmelCase_ = TEXT_TO_AUDIO_BATCH_PARAMS
UpperCAmelCase_ = frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def A_ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4), layers_per_block=2, sample_size=3_2, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=(3_2, 6_4), class_embed_type="simple_projection", projection_class_embeddings_input_dim=3_2, class_embeddings_concat=_A, )
SCREAMING_SNAKE_CASE__ : str = DDIMScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=_A, set_alpha_to_one=_A, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = AutoencoderKL(
block_out_channels=[3_2, 6_4], in_channels=1, out_channels=1, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : int = ClapTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=3_2, intermediate_size=3_7, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_0_0_0, projection_dim=3_2, )
SCREAMING_SNAKE_CASE__ : Dict = ClapTextModelWithProjection(_A )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta", model_max_length=7_7 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = SpeechTaHifiGanConfig(
model_in_dim=8, sampling_rate=1_6_0_0_0, upsample_initial_channel=1_6, upsample_rates=[2, 2], upsample_kernel_sizes=[4, 4], resblock_kernel_sizes=[3, 7], resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]], normalize_before=_A, )
SCREAMING_SNAKE_CASE__ : List[str] = SpeechTaHifiGan(_A )
SCREAMING_SNAKE_CASE__ : str = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"vocoder": vocoder,
}
return components
def A_ ( self : Optional[int], _UpperCAmelCase : int, _UpperCAmelCase : Optional[int]=0 ) -> Optional[Any]:
"""simple docstring"""
if str(_A ).startswith("mps" ):
SCREAMING_SNAKE_CASE__ : Any = torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE__ : List[str] = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def A_ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Dict = AudioLDMPipeline(**_A )
SCREAMING_SNAKE_CASE__ : List[str] = audioldm_pipe.to(_A )
audioldm_pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE__ : List[str] = audioldm_pipe(**_A )
SCREAMING_SNAKE_CASE__ : List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(_A ) == 2_5_6
SCREAMING_SNAKE_CASE__ : List[str] = audio[:1_0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def A_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AudioLDMPipeline(**_A )
SCREAMING_SNAKE_CASE__ : Tuple = audioldm_pipe.to(_A )
SCREAMING_SNAKE_CASE__ : Optional[Any] = audioldm_pipe.to(_A )
audioldm_pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE__ : Tuple = 3 * [inputs["prompt"]]
# forward
SCREAMING_SNAKE_CASE__ : List[Any] = audioldm_pipe(**_A )
SCREAMING_SNAKE_CASE__ : List[Any] = output.audios[0]
SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE__ : int = 3 * [inputs.pop("prompt" )]
SCREAMING_SNAKE_CASE__ : List[Any] = audioldm_pipe.tokenizer(
_A, padding="max_length", max_length=audioldm_pipe.tokenizer.model_max_length, truncation=_A, return_tensors="pt", )
SCREAMING_SNAKE_CASE__ : Tuple = text_inputs["input_ids"].to(_A )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = audioldm_pipe.text_encoder(
_A, )
SCREAMING_SNAKE_CASE__ : Optional[Any] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE__ : List[Any] = F.normalize(_A, dim=-1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = prompt_embeds
# forward
SCREAMING_SNAKE_CASE__ : Optional[int] = audioldm_pipe(**_A )
SCREAMING_SNAKE_CASE__ : List[str] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def A_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : int = AudioLDMPipeline(**_A )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = audioldm_pipe.to(_A )
SCREAMING_SNAKE_CASE__ : List[Any] = audioldm_pipe.to(_A )
audioldm_pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE__ : int = 3 * ["this is a negative prompt"]
SCREAMING_SNAKE_CASE__ : Any = negative_prompt
SCREAMING_SNAKE_CASE__ : List[str] = 3 * [inputs["prompt"]]
# forward
SCREAMING_SNAKE_CASE__ : List[Any] = audioldm_pipe(**_A )
SCREAMING_SNAKE_CASE__ : str = output.audios[0]
SCREAMING_SNAKE_CASE__ : Any = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE__ : int = 3 * [inputs.pop("prompt" )]
SCREAMING_SNAKE_CASE__ : List[Any] = []
for p in [prompt, negative_prompt]:
SCREAMING_SNAKE_CASE__ : str = audioldm_pipe.tokenizer(
_A, padding="max_length", max_length=audioldm_pipe.tokenizer.model_max_length, truncation=_A, return_tensors="pt", )
SCREAMING_SNAKE_CASE__ : int = text_inputs["input_ids"].to(_A )
SCREAMING_SNAKE_CASE__ : Any = audioldm_pipe.text_encoder(
_A, )
SCREAMING_SNAKE_CASE__ : Optional[int] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE__ : List[str] = F.normalize(_A, dim=-1 )
embeds.append(_A )
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[int] = embeds
# forward
SCREAMING_SNAKE_CASE__ : Tuple = audioldm_pipe(**_A )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def A_ ( self : str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Any = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Dict = PNDMScheduler(skip_prk_steps=_A )
SCREAMING_SNAKE_CASE__ : List[str] = AudioLDMPipeline(**_A )
SCREAMING_SNAKE_CASE__ : List[Any] = audioldm_pipe.to(_A )
audioldm_pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "egg cracking"
SCREAMING_SNAKE_CASE__ : List[str] = audioldm_pipe(**_A, negative_prompt=_A )
SCREAMING_SNAKE_CASE__ : List[str] = output.audios[0]
assert audio.ndim == 1
assert len(_A ) == 2_5_6
SCREAMING_SNAKE_CASE__ : Any = audio[:1_0]
SCREAMING_SNAKE_CASE__ : int = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def A_ ( self : Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Optional[int] = PNDMScheduler(skip_prk_steps=_A )
SCREAMING_SNAKE_CASE__ : List[Any] = AudioLDMPipeline(**_A )
SCREAMING_SNAKE_CASE__ : List[Any] = audioldm_pipe.to(_A )
audioldm_pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE__ : str = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
SCREAMING_SNAKE_CASE__ : List[Any] = audioldm_pipe(_A, num_inference_steps=2 ).audios
assert audios.shape == (1, 2_5_6)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
SCREAMING_SNAKE_CASE__ : Tuple = 2
SCREAMING_SNAKE_CASE__ : int = audioldm_pipe([prompt] * batch_size, num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_5_6)
# test num_waveforms_per_prompt for single prompt
SCREAMING_SNAKE_CASE__ : int = 2
SCREAMING_SNAKE_CASE__ : Tuple = audioldm_pipe(_A, num_inference_steps=2, num_waveforms_per_prompt=_A ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_5_6)
# test num_waveforms_per_prompt for batch of prompts
SCREAMING_SNAKE_CASE__ : List[str] = 2
SCREAMING_SNAKE_CASE__ : Tuple = audioldm_pipe(
[prompt] * batch_size, num_inference_steps=2, num_waveforms_per_prompt=_A ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6)
def A_ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : int = AudioLDMPipeline(**_A )
SCREAMING_SNAKE_CASE__ : Optional[int] = audioldm_pipe.to(_A )
audioldm_pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE__ : Any = audioldm_pipe.vocoder.config.sampling_rate
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE__ : int = audioldm_pipe(audio_length_in_s=0.016, **_A )
SCREAMING_SNAKE_CASE__ : int = output.audios[0]
assert audio.ndim == 1
assert len(_A ) / vocoder_sampling_rate == 0.016
SCREAMING_SNAKE_CASE__ : str = audioldm_pipe(audio_length_in_s=0.032, **_A )
SCREAMING_SNAKE_CASE__ : Tuple = output.audios[0]
assert audio.ndim == 1
assert len(_A ) / vocoder_sampling_rate == 0.032
def A_ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : int = AudioLDMPipeline(**_A )
SCREAMING_SNAKE_CASE__ : str = audioldm_pipe.to(_A )
audioldm_pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE__ : Optional[int] = ["hey"]
SCREAMING_SNAKE_CASE__ : str = audioldm_pipe(_A, num_inference_steps=1 )
SCREAMING_SNAKE_CASE__ : str = output.audios.shape
assert audio_shape == (1, 2_5_6)
SCREAMING_SNAKE_CASE__ : Dict = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
SCREAMING_SNAKE_CASE__ : Tuple = SpeechTaHifiGan(_A ).to(_A )
SCREAMING_SNAKE_CASE__ : Optional[Any] = audioldm_pipe(_A, num_inference_steps=1 )
SCREAMING_SNAKE_CASE__ : Dict = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_5_6)
def A_ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_A )
def A_ ( self : str ) -> int:
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=_A )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", )
def A_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_A )
@slow
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : List[str], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : List[str]="cpu", _UpperCAmelCase : Any=torch.floataa, _UpperCAmelCase : Dict=0 ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.random.RandomState(_A ).standard_normal((1, 8, 1_2_8, 1_6) )
SCREAMING_SNAKE_CASE__ : Dict = torch.from_numpy(_A ).to(device=_A, dtype=_A )
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def A_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = audioldm_pipe.to(_A )
audioldm_pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE__ : str = self.get_inputs(_A )
SCREAMING_SNAKE_CASE__ : List[Any] = 2_5
SCREAMING_SNAKE_CASE__ : Union[str, Any] = audioldm_pipe(**_A ).audios[0]
assert audio.ndim == 1
assert len(_A ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE__ : List[Any] = audio[7_7_2_3_0:7_7_2_4_0]
SCREAMING_SNAKE_CASE__ : Any = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def A_ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
SCREAMING_SNAKE_CASE__ : Dict = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = audioldm_pipe.to(_A )
audioldm_pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE__ : Tuple = self.get_inputs(_A )
SCREAMING_SNAKE_CASE__ : Optional[int] = audioldm_pipe(**_A ).audios[0]
assert audio.ndim == 1
assert len(_A ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE__ : int = audio[2_7_7_8_0:2_7_7_9_0]
SCREAMING_SNAKE_CASE__ : List[str] = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
SCREAMING_SNAKE_CASE__ : int = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 705 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_lowerCamelCase : Tuple = datasets.logging.get_logger(__name__)
_lowerCamelCase : Any = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
_lowerCamelCase : Optional[int] = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
_lowerCamelCase : str = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def _a ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : List[Any]="dummy_doc" ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = {doc: key_lines}
SCREAMING_SNAKE_CASE__ : Tuple = {doc: sys_lines}
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
SCREAMING_SNAKE_CASE__ : List[str] = 0
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : str = 0
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = reader.get_doc_mentions(SCREAMING_SNAKE_CASE__ , key_doc_lines[doc] , SCREAMING_SNAKE_CASE__ )
key_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE__ : str = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE__ , key_doc_lines[doc] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Any = reader.get_doc_mentions(SCREAMING_SNAKE_CASE__ , sys_doc_lines[doc] , SCREAMING_SNAKE_CASE__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE__ : Tuple = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE__ , key_doc_lines[doc] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if remove_nested:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE__ : Dict = reader.get_mention_assignments(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = reader.get_mention_assignments(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"Number of removed nested coreferring mentions in the key "
f'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
"Number of resulting singleton clusters in the key "
f'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
f'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"files, respectively" )
return doc_coref_infos
def _a ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = get_coref_infos(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = {}
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
for name, metric in metrics:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : int = evaluator.evaluate_documents(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f'''{name}/recall''': recall, f'''{name}/precision''': precision, f'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ) , f'''Recall: {recall * 1_00:.2f}''' , f''' Precision: {precision * 1_00:.2f}''' , f''' F1: {fa * 1_00:.2f}''' , )
if conll_subparts_num == 3:
SCREAMING_SNAKE_CASE__ : List[Any] = (conll / 3) * 1_00
logger.info(f'''CoNLL score: {conll:.2f}''' )
output_scores.update({"conll_score": conll} )
return output_scores
def _a ( SCREAMING_SNAKE_CASE__ : Any ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = False
for line in key_lines:
if not line.startswith("#" ):
if len(line.split() ) > 6:
SCREAMING_SNAKE_CASE__ : Optional[int] = line.split()[5]
if not parse_col == "-":
SCREAMING_SNAKE_CASE__ : Any = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase (datasets.Metric ):
"""simple docstring"""
def A_ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Sequence(datasets.Value("string" ) ),
} ), codebase_urls=["https://github.com/ns-moosavi/coval"], reference_urls=[
"https://github.com/ns-moosavi/coval",
"https://www.aclweb.org/anthology/P16-1060",
"http://www.conll.cemantix.org/2012/data.html",
], )
def A_ ( self : Any, _UpperCAmelCase : Optional[int], _UpperCAmelCase : str, _UpperCAmelCase : str=True, _UpperCAmelCase : int=False, _UpperCAmelCase : Any=False, _UpperCAmelCase : Optional[Any]=False ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = [
("mentions", evaluator.mentions),
("muc", evaluator.muc),
("bcub", evaluator.b_cubed),
("ceafe", evaluator.ceafe),
("lea", evaluator.lea),
]
if min_span:
SCREAMING_SNAKE_CASE__ : Any = util.check_gold_parse_annotation(_UpperCAmelCase )
if not has_gold_parse:
raise NotImplementedError("References should have gold parse annotation to use 'min_span'." )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
SCREAMING_SNAKE_CASE__ : List[str] = evaluate(
key_lines=_UpperCAmelCase, sys_lines=_UpperCAmelCase, metrics=_UpperCAmelCase, NP_only=_UpperCAmelCase, remove_nested=_UpperCAmelCase, keep_singletons=_UpperCAmelCase, min_span=_UpperCAmelCase, )
return score
| 157 | 0 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Any:
'''simple docstring'''
lowerCamelCase__: Dict =self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(UpperCAmelCase_ , "hidden_sizes"))
self.parent.assertTrue(hasattr(UpperCAmelCase_ , "num_attention_heads"))
self.parent.assertTrue(hasattr(UpperCAmelCase_ , "num_encoder_blocks"))
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any]=13 , UpperCAmelCase_ : Tuple=64 , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Dict=[2, 2, 2, 2] , UpperCAmelCase_ : Dict=[8, 4, 2, 1] , UpperCAmelCase_ : Optional[Any]=[16, 32, 64, 128] , UpperCAmelCase_ : str=[1, 4, 8, 16] , UpperCAmelCase_ : Optional[int]=[1, 2, 4, 8] , UpperCAmelCase_ : str=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : List[Any]=None , ) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =parent
lowerCamelCase__: int =batch_size
lowerCamelCase__: Tuple =image_size
lowerCamelCase__: List[Any] =num_channels
lowerCamelCase__: Dict =num_encoder_blocks
lowerCamelCase__: Dict =sr_ratios
lowerCamelCase__: Union[str, Any] =depths
lowerCamelCase__: Tuple =hidden_sizes
lowerCamelCase__: Any =downsampling_rates
lowerCamelCase__: int =num_attention_heads
lowerCamelCase__: Tuple =is_training
lowerCamelCase__: Any =use_labels
lowerCamelCase__: str =hidden_act
lowerCamelCase__: Tuple =hidden_dropout_prob
lowerCamelCase__: Union[str, Any] =attention_probs_dropout_prob
lowerCamelCase__: int =initializer_range
lowerCamelCase__: int =num_labels
lowerCamelCase__: List[Any] =scope
def SCREAMING_SNAKE_CASE_ (self : str) ->Dict:
'''simple docstring'''
lowerCamelCase__: List[Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCamelCase__: Union[str, Any] =None
if self.use_labels:
lowerCamelCase__: List[Any] =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
lowerCamelCase__: List[str] =self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[str]:
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: str =SegformerModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_)
lowerCamelCase__: List[str] =self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width))
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Tuple =self.num_labels
lowerCamelCase__: Any =SegformerForSemanticSegmentation(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: Any =model(UpperCAmelCase_)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
self.parent.assertGreater(result.loss , 0.0)
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =1
lowerCamelCase__: Union[str, Any] =SegformerForSemanticSegmentation(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: str =torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size)).to(UpperCAmelCase_)
lowerCamelCase__: Tuple =model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertGreater(result.loss , 0.0)
def SCREAMING_SNAKE_CASE_ (self : str) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =config_and_inputs
lowerCamelCase__: List[str] ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
lowercase_ = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase_ = True
lowercase_ = False
lowercase_ = False
lowercase_ = False
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =SegformerModelTester(self)
lowerCamelCase__: str =SegformerConfigTester(self , config_class=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->str:
'''simple docstring'''
lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->int:
'''simple docstring'''
lowerCamelCase__: Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*UpperCAmelCase_)
@unittest.skip("SegFormer does not use inputs_embeds")
def SCREAMING_SNAKE_CASE_ (self : str) ->Tuple:
'''simple docstring'''
pass
@unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods")
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : Any) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: Union[str, Any] =model_class(UpperCAmelCase_)
lowerCamelCase__: Dict =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__: int =[*signature.parameters.keys()]
lowerCamelCase__: str =["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Tuple:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__: List[Any] =True
for model_class in self.all_model_classes:
lowerCamelCase__: Optional[int] =True
lowerCamelCase__: Dict =False
lowerCamelCase__: Any =True
lowerCamelCase__: str =model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
lowerCamelCase__: Optional[int] =model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
lowerCamelCase__: Tuple =outputs.attentions
lowerCamelCase__: List[Any] =sum(self.model_tester.depths)
self.assertEqual(len(UpperCAmelCase_) , UpperCAmelCase_)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase__: Optional[Any] =True
lowerCamelCase__: List[str] =model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
lowerCamelCase__: Optional[Any] =model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
lowerCamelCase__: List[Any] =outputs.attentions
self.assertEqual(len(UpperCAmelCase_) , UpperCAmelCase_)
# verify the first attentions (first block, first layer)
lowerCamelCase__: Dict =(self.model_tester.image_size // 4) ** 2
lowerCamelCase__: int =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
lowerCamelCase__: Any =(self.model_tester.image_size // 32) ** 2
lowerCamelCase__: str =(self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:]) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
lowerCamelCase__: int =len(UpperCAmelCase_)
# Check attention is always last and order is fine
lowerCamelCase__: Union[str, Any] =True
lowerCamelCase__: List[Any] =True
lowerCamelCase__: Dict =model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
lowerCamelCase__: Union[str, Any] =model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
self.assertEqual(out_len + 1 , len(UpperCAmelCase_))
lowerCamelCase__: Any =outputs.attentions
self.assertEqual(len(UpperCAmelCase_) , UpperCAmelCase_)
# verify the first attentions (first block, first layer)
lowerCamelCase__: Union[str, Any] =(self.model_tester.image_size // 4) ** 2
lowerCamelCase__: Any =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Dict:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any]):
lowerCamelCase__: List[str] =model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
lowerCamelCase__: str =model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
lowerCamelCase__: Any =outputs.hidden_states
lowerCamelCase__: Any =self.model_tester.num_encoder_blocks
self.assertEqual(len(UpperCAmelCase_) , UpperCAmelCase_)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowerCamelCase__ , lowerCamelCase__: str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: Tuple =True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__: Any =True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]:
'''simple docstring'''
if not self.model_tester.is_training:
return
lowerCamelCase__ , lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__: Tuple =True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCAmelCase_):
continue
lowerCamelCase__: List[str] =model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.train()
lowerCamelCase__: Tuple =self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =model(**UpperCAmelCase_).loss
loss.backward()
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def SCREAMING_SNAKE_CASE_ (self : str) ->Tuple:
'''simple docstring'''
pass
@slow
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]:
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__: Union[str, Any] =SegformerModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def lowerCAmelCase_ ( ) -> Dict:
"""simple docstring"""
lowerCamelCase__: Any =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCAmelCase_ , align=UpperCAmelCase_ , do_random_crop=UpperCAmelCase_)
lowerCamelCase__: Tuple =SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512").to(
UpperCAmelCase_)
lowerCamelCase__: Tuple =prepare_img()
lowerCamelCase__: Union[str, Any] =image_processor(images=UpperCAmelCase_ , return_tensors="pt")
lowerCamelCase__: List[str] =encoded_inputs.pixel_values.to(UpperCAmelCase_)
with torch.no_grad():
lowerCamelCase__: Dict =model(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =torch.Size((1, model.config.num_labels, 128, 128))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCAmelCase_ , atol=1E-4))
@slow
def SCREAMING_SNAKE_CASE_ (self : Any) ->str:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCAmelCase_ , align=UpperCAmelCase_ , do_random_crop=UpperCAmelCase_)
lowerCamelCase__: str =SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024").to(UpperCAmelCase_)
lowerCamelCase__: Tuple =prepare_img()
lowerCamelCase__: Optional[Any] =image_processor(images=UpperCAmelCase_ , return_tensors="pt")
lowerCamelCase__: Tuple =encoded_inputs.pixel_values.to(UpperCAmelCase_)
with torch.no_grad():
lowerCamelCase__: List[Any] =model(UpperCAmelCase_)
lowerCamelCase__: Any =torch.Size((1, model.config.num_labels, 128, 128))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCAmelCase_ , atol=1E-1))
@slow
def SCREAMING_SNAKE_CASE_ (self : str) ->Dict:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCAmelCase_ , align=UpperCAmelCase_ , do_random_crop=UpperCAmelCase_)
lowerCamelCase__: str =SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512").to(
UpperCAmelCase_)
lowerCamelCase__: int =prepare_img()
lowerCamelCase__: Tuple =image_processor(images=UpperCAmelCase_ , return_tensors="pt")
lowerCamelCase__: Any =encoded_inputs.pixel_values.to(UpperCAmelCase_)
with torch.no_grad():
lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_)
lowerCamelCase__: Dict =outputs.logits.detach().cpu()
lowerCamelCase__: Any =image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase_ , target_sizes=[(500, 300)])
lowerCamelCase__: List[str] =torch.Size((500, 300))
self.assertEqual(segmentation[0].shape , UpperCAmelCase_)
lowerCamelCase__: Tuple =image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase_)
lowerCamelCase__: Dict =torch.Size((128, 128))
self.assertEqual(segmentation[0].shape , UpperCAmelCase_)
| 59 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
__A = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCAmelCase_ ( __a ) -> str:
"""simple docstring"""
if isinstance(__a , torch.Tensor ):
return image
elif isinstance(__a , PIL.Image.Image ):
lowerCamelCase__: Any =[image]
lowerCamelCase__: Optional[Any] =[trans(img.convert("RGB" ) ) for img in image]
lowerCamelCase__: Dict =torch.stack(__a )
return image
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple) ->int:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCamelCase__: Tuple =DDIMScheduler.from_config(scheduler.config)
self.register_modules(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Union[str, Any]) ->Dict:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""")
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple) ->Tuple:
'''simple docstring'''
lowerCamelCase__: int =min(int(num_inference_steps * strength) , UpperCAmelCase_)
lowerCamelCase__: str =max(num_inference_steps - init_timestep , 0)
lowerCamelCase__: int =self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]=None) ->Optional[int]:
'''simple docstring'''
if not isinstance(UpperCAmelCase_ , (torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(UpperCAmelCase_)}""")
lowerCamelCase__: Optional[int] =image.to(device=UpperCAmelCase_ , dtype=UpperCAmelCase_)
if isinstance(UpperCAmelCase_ , UpperCAmelCase_) and len(UpperCAmelCase_) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(UpperCAmelCase_)}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""")
lowerCamelCase__: Dict =init_latents.shape
lowerCamelCase__: int =randn_tensor(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=UpperCAmelCase_)
# get latents
print("add noise to latents at timestep" , UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: int =init_latents
return latents
@torch.no_grad()
def __call__(self : Tuple , UpperCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image] = None , UpperCAmelCase_ : float = 0.8 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , ) ->Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(UpperCAmelCase_)
# 2. Preprocess image
lowerCamelCase__: Dict =preprocess(UpperCAmelCase_)
# 3. set timesteps
self.scheduler.set_timesteps(UpperCAmelCase_ , device=self.device)
lowerCamelCase__ , lowerCamelCase__: str =self.get_timesteps(UpperCAmelCase_ , UpperCAmelCase_ , self.device)
lowerCamelCase__: Optional[int] =timesteps[:1].repeat(UpperCAmelCase_)
# 4. Prepare latent variables
lowerCamelCase__: int =self.prepare_latents(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , self.unet.dtype , self.device , UpperCAmelCase_)
lowerCamelCase__: Tuple =latents
# 5. Denoising loop
for t in self.progress_bar(UpperCAmelCase_):
# 1. predict noise model_output
lowerCamelCase__: Dict =self.unet(UpperCAmelCase_ , UpperCAmelCase_).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCamelCase__: Optional[int] =self.scheduler.step(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , eta=UpperCAmelCase_ , use_clipped_model_output=UpperCAmelCase_ , generator=UpperCAmelCase_ , ).prev_sample
lowerCamelCase__: str =(image / 2 + 0.5).clamp(0 , 1)
lowerCamelCase__: Optional[Any] =image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
lowerCamelCase__: Dict =self.numpy_to_pil(UpperCAmelCase_)
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=UpperCAmelCase_)
| 59 | 1 |
import argparse
import datetime
def lowercase_ (A : str ):
snake_case__ : int = {
'0': 'Sunday',
'1': 'Monday',
'2': 'Tuesday',
'3': 'Wednesday',
'4': 'Thursday',
'5': 'Friday',
'6': 'Saturday',
}
snake_case__ : Union[str, Any] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(A ) < 1_1:
raise ValueError('Must be 10 characters long' )
# Get month
snake_case__ : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 1_3:
raise ValueError('Month must be between 1 - 12' )
snake_case__ : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get day
snake_case__ : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 3_2:
raise ValueError('Date must be between 1 - 31' )
# Get second separator
snake_case__ : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get year
snake_case__ : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 4_5 < y < 8_5_0_0:
raise ValueError(
'Year out of range. There has to be some sort of limit...right?' )
# Get datetime obj for validation
snake_case__ : Optional[int] = datetime.date(int(A ) , int(A ) , int(A ) )
# Start math
if m <= 2:
snake_case__ : Optional[Any] = y - 1
snake_case__ : int = m + 1_2
# maths var
snake_case__ : int = int(str(A )[:2] )
snake_case__ : int = int(str(A )[2:] )
snake_case__ : int = int(2.6 * m - 5.39 )
snake_case__ : int = int(c / 4 )
snake_case__ : int = int(k / 4 )
snake_case__ : int = int(d + k )
snake_case__ : int = int(t + u + v + x )
snake_case__ : int = int(z - (2 * c) )
snake_case__ : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('The date was evaluated incorrectly. Contact developer.' )
# Response
snake_case__ : str = F'''Your date {date_input}, is a {days[str(A )]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ :Union[str, Any] = argparse.ArgumentParser(
description=(
"Find out what day of the week nearly any date is or was. Enter "
"date as a string in the mm-dd-yyyy or mm/dd/yyyy format"
)
)
parser.add_argument(
"date_input", type=str, help="Date as a string (mm-dd-yyyy or mm/dd/yyyy)"
)
a_ :Optional[int] = parser.parse_args()
zeller(args.date_input)
| 243 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
a_ :int = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
a_ :str = (
subprocess.check_output(F"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode("utf-8").split()
)
a_ :int = "|".join(sys.argv[1:])
a_ :int = re.compile(RF"""^({joined_dirs}).*?\.py$""")
a_ :str = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 243 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__snake_case = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""GPTSw3Tokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 472 |
'''simple docstring'''
def _A ( ):
'''simple docstring'''
A__ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
A__ = 6
A__ = 1
A__ = 1901
A__ = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
A__ = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
A__ = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
A__ = day - days_per_month[month - 2]
if month > 12:
year += 1
A__ = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 531 | 0 |
'''simple docstring'''
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
_snake_case = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
_snake_case = []
_snake_case = []
_snake_case = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
_snake_case = [
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': F"🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results",
'''emoji''': True,
},
}
]
_snake_case = 0
for log in Path().glob('''*.log'''):
_snake_case = 0
with open(log, '''r''') as f:
for line in f:
_snake_case = json.loads(line)
if line.get('''nodeid''', '''''') != "":
_snake_case = line['''nodeid''']
if line.get('''duration''', None) is not None:
_snake_case = F"{line['duration']:.4f}"
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
_snake_case = []
log.unlink()
_snake_case = ''''''
_snake_case = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
_snake_case = []
_snake_case = {}
for test in failed_tests:
_snake_case = test[0].split('''::''')
_snake_case = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
_snake_case = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
_snake_case = [test[0] for test in failed_table]
_snake_case = list(set(files))
# Count number of instances in failed_tests
_snake_case = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
_snake_case = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
_snake_case = '''Too many failed tests, please see the full report in the Action results.'''
_snake_case = len(err) + 10
_snake_case = message[: 3_000 - offset] + F"\n...\n```\n{err}"
print(F"### {message}")
else:
_snake_case = '''No failed tests! 🤗'''
print(F"## {message}")
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
_snake_case = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
_snake_case = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
_snake_case = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': F"https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
payload.append(action_button)
_snake_case = {
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': F"Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}",
}
],
}
payload.append(date_report)
_snake_case = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
_snake_case = response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
_snake_case = ''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
_snake_case = row[0]
else:
_snake_case = ''''''
_snake_case = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': F"Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```",
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 701 |
from math import factorial, pi
def __lowerCamelCase ( _lowercase , _lowercase = 30 ) -> float:
if not isinstance(_lowercase , (int, float) ):
raise ValueError('maclaurin_sin() requires either an int or float for theta' )
if not isinstance(_lowercase , _lowercase ) or accuracy <= 0:
raise ValueError('maclaurin_sin() requires a positive int for accuracy' )
UpperCamelCase = float(_lowercase )
UpperCamelCase = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(_lowercase ) )
def __lowerCamelCase ( _lowercase , _lowercase = 30 ) -> float:
if not isinstance(_lowercase , (int, float) ):
raise ValueError('maclaurin_cos() requires either an int or float for theta' )
if not isinstance(_lowercase , _lowercase ) or accuracy <= 0:
raise ValueError('maclaurin_cos() requires a positive int for accuracy' )
UpperCamelCase = float(_lowercase )
UpperCamelCase = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(_lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 170 | 0 |
from typing import Any
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase) -> List[str]:
__UpperCamelCase :List[str] = data
__UpperCamelCase :Optional[Any] = None
def __repr__( self) -> int:
return f"""Node({self.data})"""
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self) -> Any:
__UpperCamelCase :Dict = None
def __iter__( self) -> Any:
__UpperCamelCase :int = self.head
while node:
yield node.data
__UpperCamelCase :List[Any] = node.next
def __len__( self) -> Any:
return sum(1 for _ in self)
def __repr__( self) -> List[str]:
return "->".join([str(_UpperCAmelCase) for item in self])
def __getitem__( self , __lowercase) -> Dict:
if not 0 <= index < len(self):
raise ValueError('''list index out of range.''')
for i, node in enumerate(self):
if i == index:
return node
return None
def __setitem__( self , __lowercase , __lowercase) -> List[str]:
if not 0 <= index < len(self):
raise ValueError('''list index out of range.''')
__UpperCamelCase :Optional[int] = self.head
for _ in range(_UpperCAmelCase):
__UpperCamelCase :List[Any] = current.next
__UpperCamelCase :List[Any] = data
def UpperCamelCase__ ( self , __lowercase) -> List[Any]:
self.insert_nth(len(self) , _UpperCAmelCase)
def UpperCamelCase__ ( self , __lowercase) -> int:
self.insert_nth(0 , _UpperCAmelCase)
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Tuple:
if not 0 <= index <= len(self):
raise IndexError('''list index out of range''')
__UpperCamelCase :List[Any] = Node(_UpperCAmelCase)
if self.head is None:
__UpperCamelCase :Dict = new_node
elif index == 0:
__UpperCamelCase :Any = self.head # link new_node to head
__UpperCamelCase :Tuple = new_node
else:
__UpperCamelCase :Union[str, Any] = self.head
for _ in range(index - 1):
__UpperCamelCase :int = temp.next
__UpperCamelCase :int = temp.next
__UpperCamelCase :Any = new_node
def UpperCamelCase__ ( self) -> Dict: # print every node data
print(self)
def UpperCamelCase__ ( self) -> List[str]:
return self.delete_nth(0)
def UpperCamelCase__ ( self) -> Optional[Any]: # delete from tail
return self.delete_nth(len(self) - 1)
def UpperCamelCase__ ( self , __lowercase = 0) -> Any:
if not 0 <= index <= len(self) - 1: # test if index is valid
raise IndexError('''List index out of range.''')
__UpperCamelCase :Any = self.head # default first node
if index == 0:
__UpperCamelCase :Tuple = self.head.next
else:
__UpperCamelCase :Union[str, Any] = self.head
for _ in range(index - 1):
__UpperCamelCase :List[str] = temp.next
__UpperCamelCase :int = temp.next
__UpperCamelCase :List[str] = temp.next.next
return delete_node.data
def UpperCamelCase__ ( self) -> List[str]:
return self.head is None
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Optional[int] = None
__UpperCamelCase :Union[str, Any] = self.head
while current:
# Store the current node's next node.
__UpperCamelCase :Union[str, Any] = current.next
# Make the current node's next point backwards
__UpperCamelCase :Union[str, Any] = prev
# Make the previous node be the current node
__UpperCamelCase :Union[str, Any] = current
# Make the current node the next node (to progress iteration)
__UpperCamelCase :Any = next_node
# Return prev in order to put the head at the end
__UpperCamelCase :int = prev
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = LinkedList()
assert linked_list.is_empty() is True
assert str(__UpperCAmelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__UpperCAmelCase ) == i
linked_list.insert_nth(__UpperCAmelCase , i + 1 )
assert str(__UpperCAmelCase ) == "->".join(str(__UpperCAmelCase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__UpperCAmelCase ) == "->".join(str(__UpperCAmelCase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__UpperCAmelCase ) == 9
assert str(__UpperCAmelCase ) == "->".join(str(__UpperCAmelCase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__UpperCamelCase :Optional[Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(__UpperCAmelCase ) == "->".join(str(__UpperCAmelCase ) for i in range(-8 , 1 ) )
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :List[Any] = [
-9,
100,
Node(77_345_112 ),
'''dlrow olleH''',
7,
5_555,
0,
-192.55_555,
'''Hello, world!''',
77.9,
Node(10 ),
None,
None,
12.20,
]
__UpperCamelCase :Union[str, Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(__UpperCAmelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__UpperCAmelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__UpperCamelCase :Tuple = linked_list.delete_head()
assert result == -9
assert (
str(__UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__UpperCamelCase :str = linked_list.delete_tail()
assert result == 12.2
assert (
str(__UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__UpperCamelCase :List[str] = linked_list.delete_nth(10 )
assert result is None
assert (
str(__UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('''Hello again, world!''' ) )
assert (
str(__UpperCAmelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__UpperCAmelCase )
assert (
str(__UpperCAmelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__UpperCAmelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowerCamelCase ( ):
'''simple docstring'''
from doctest import testmod
testmod()
__UpperCamelCase :Any = LinkedList()
linked_list.insert_head(input('''Inserting 1st at head ''' ).strip() )
linked_list.insert_head(input('''Inserting 2nd at head ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
linked_list.insert_tail(input('''\nInserting 1st at tail ''' ).strip() )
linked_list.insert_tail(input('''Inserting 2nd at tail ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nDelete head''' )
linked_list.delete_head()
print('''Delete tail''' )
linked_list.delete_tail()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nReverse linked list''' )
linked_list.reverse()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nString representation of linked list:''' )
print(__UpperCAmelCase )
print('''\nReading/changing Node data using indexing:''' )
print(f"""Element at Position 1: {linked_list[1]}""" )
__UpperCamelCase :Optional[int] = input('''Enter New Value: ''' ).strip()
print('''New list:''' )
print(__UpperCAmelCase )
print(f"""length of linked_list is : {len(__UpperCAmelCase )}""" )
if __name__ == "__main__":
main()
| 167 | """simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
__A = 637_8137.0
__A = 635_6752.31_4245
__A = 6_3_7_8_1_3_7
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> float:
lowercase__: str = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
lowercase__: Tuple = atan((1 - flattening) * tan(radians(__UpperCAmelCase ) ) )
lowercase__: Optional[int] = atan((1 - flattening) * tan(radians(__UpperCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
lowercase__: Dict = haversine_distance(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
lowercase__: Dict = (b_lata + b_lata) / 2
lowercase__: Optional[int] = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
lowercase__: List[Any] = (sin(__UpperCAmelCase ) ** 2) * (cos(__UpperCAmelCase ) ** 2)
lowercase__: int = cos(sigma / 2 ) ** 2
lowercase__: Union[str, Any] = (sigma - sin(__UpperCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
lowercase__: Optional[int] = (cos(__UpperCAmelCase ) ** 2) * (sin(__UpperCAmelCase ) ** 2)
lowercase__: List[Any] = sin(sigma / 2 ) ** 2
lowercase__: str = (sigma + sin(__UpperCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 586 | 0 |
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""Input value must be an 'int' type""" )
lowercase_ = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def UpperCAmelCase_ ( UpperCAmelCase__=None ):
if subparsers is not None:
lowercase_ = subparsers.add_parser("""env""" )
else:
lowercase_ = argparse.ArgumentParser("""Accelerate env command""" )
parser.add_argument(
"""--config_file""" , default=UpperCAmelCase__ , help="""The config file to use for the default values in the launching script.""" )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase__ )
return parser
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = torch.__version__
lowercase_ = torch.cuda.is_available()
lowercase_ = is_xpu_available()
lowercase_ = is_npu_available()
lowercase_ = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(UpperCAmelCase__ ):
lowercase_ = load_config_from_file(args.config_file ).to_dict()
lowercase_ = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''',
"""PyTorch XPU available""": str(UpperCAmelCase__ ),
"""PyTorch NPU available""": str(UpperCAmelCase__ ),
"""System RAM""": F'''{psutil.virtual_memory().total / 1_0_2_4 ** 3:.2f} GB''',
}
if pt_cuda_available:
lowercase_ = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""" )
print("""\n""".join([F'''- {prop}: {val}''' for prop, val in info.items()] ) )
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" )
lowercase_ = (
"""\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
else F'''\t{accelerate_config}'''
)
print(UpperCAmelCase__ )
lowercase_ = accelerate_config
return info
def UpperCAmelCase_ ( ):
lowercase_ = env_command_parser()
lowercase_ = parser.parse_args()
env_command(UpperCAmelCase__ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 650 | 0 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class lowercase ( pl.LightningModule):
"""simple docstring"""
def __init__( self : List[str] , __UpperCAmelCase : str ) -> List[Any]:
super().__init__()
UpperCAmelCase_= model
UpperCAmelCase_= 2
UpperCAmelCase_= nn.Linear(self.model.config.hidden_size , self.num_labels )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
pass
def __a ( lowerCAmelCase_ : str ,lowerCAmelCase_ : str ,lowerCAmelCase_ : str ) -> Dict:
'''simple docstring'''
UpperCAmelCase_= LongformerModel.from_pretrained(__lowerCamelCase )
UpperCAmelCase_= LightningModel(__lowerCamelCase )
UpperCAmelCase_= torch.load(__lowerCamelCase ,map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
UpperCAmelCase_= LongformerForQuestionAnswering.from_pretrained(__lowerCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__lowerCamelCase )
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__A = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 593 |
'''simple docstring'''
def lowerCamelCase__ ( __lowerCamelCase : str ):
'''simple docstring'''
_UpperCAmelCase : Dict =''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def lowerCamelCase__ ( __lowerCamelCase : str ):
'''simple docstring'''
_UpperCAmelCase : List[str] =[chr(i + 6_5 ) for i in range(2_6 )]
# Remove duplicate characters from key
_UpperCAmelCase : Any =remove_duplicates(key.upper() )
_UpperCAmelCase : Optional[Any] =len(__lowerCamelCase )
# First fill cipher with key characters
_UpperCAmelCase : List[Any] ={alphabet[i]: char for i, char in enumerate(__lowerCamelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(__lowerCamelCase ) , 2_6 ):
_UpperCAmelCase : Union[str, Any] =alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
_UpperCAmelCase : str =alphabet[i - offset]
_UpperCAmelCase : Any =char
return cipher_alphabet
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : dict[str, str] ):
'''simple docstring'''
return "".join(cipher_map.get(__lowerCamelCase , __lowerCamelCase ) for ch in message.upper() )
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : dict[str, str] ):
'''simple docstring'''
_UpperCAmelCase : List[str] ={v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(__lowerCamelCase , __lowerCamelCase ) for ch in message.upper() )
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =input('Enter message to encode or decode: ' ).strip()
_UpperCAmelCase : int =input('Enter keyword: ' ).strip()
_UpperCAmelCase : int =input('Encipher or decipher? E/D:' ).strip()[0].lower()
try:
_UpperCAmelCase : Tuple ={'e': encipher, 'd': decipher}[option]
except KeyError:
raise KeyError('invalid input option' )
_UpperCAmelCase : List[str] =create_cipher_map(__lowerCamelCase )
print(func(__lowerCamelCase , __lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 446 | 0 |
class _a :
"""simple docstring"""
def __init__( self: Dict , __lowerCamelCase: Union[str, Any] = "" , __lowerCamelCase: Any = False ):
'''simple docstring'''
UpperCamelCase__: dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
UpperCamelCase__: int = is_leaf
UpperCamelCase__: Union[str, Any] = prefix
def UpperCAmelCase_ ( self: str , __lowerCamelCase: List[str] ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = 0
for q, w in zip(self.prefix , _lowercase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCAmelCase_ ( self: Tuple , __lowerCamelCase: str ):
'''simple docstring'''
for word in words:
self.insert(_lowercase )
def UpperCAmelCase_ ( self: Dict , __lowerCamelCase: Any ):
'''simple docstring'''
if self.prefix == word:
UpperCamelCase__: Dict = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
UpperCamelCase__: Optional[int] = RadixNode(prefix=_lowercase , is_leaf=_lowercase )
else:
UpperCamelCase__: List[Any] = self.nodes[word[0]]
UpperCamelCase__: Any = incoming_node.match(
_lowercase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(_lowercase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
UpperCamelCase__: Any = remaining_prefix
UpperCamelCase__: int = self.nodes[matching_string[0]]
UpperCamelCase__: int = RadixNode(_lowercase , _lowercase )
UpperCamelCase__: List[Any] = aux_node
if remaining_word == "":
UpperCamelCase__: Optional[Any] = True
else:
self.nodes[matching_string[0]].insert(_lowercase )
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: List[Any] ):
'''simple docstring'''
UpperCamelCase__: List[str] = self.nodes.get(word[0] , _lowercase )
if not incoming_node:
return False
else:
UpperCamelCase__: Optional[int] = incoming_node.match(
_lowercase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(_lowercase )
def UpperCAmelCase_ ( self: Union[str, Any] , __lowerCamelCase: str ):
'''simple docstring'''
UpperCamelCase__: List[str] = self.nodes.get(word[0] , _lowercase )
if not incoming_node:
return False
else:
UpperCamelCase__: Optional[int] = incoming_node.match(
_lowercase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(_lowercase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
UpperCamelCase__: List[str] = list(self.nodes.values() )[0]
UpperCamelCase__: Dict = merging_node.is_leaf
self.prefix += merging_node.prefix
UpperCamelCase__: List[Any] = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
UpperCamelCase__: int = False
# If there is 1 edge, we merge it with its child
else:
UpperCamelCase__: Tuple = list(incoming_node.nodes.values() )[0]
UpperCamelCase__: Optional[int] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
UpperCamelCase__: Union[str, Any] = merging_node.nodes
return True
def UpperCAmelCase_ ( self: List[Any] , __lowerCamelCase: Dict = 0 ):
'''simple docstring'''
if self.prefix != "":
print("-" * height , self.prefix , " (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def lowerCAmelCase_ ( ):
UpperCamelCase__: int = """banana bananas bandana band apple all beast""".split()
UpperCamelCase__: List[str] = RadixNode()
root.insert_many(__UpperCamelCase)
assert all(root.find(__UpperCamelCase) for word in words)
assert not root.find("bandanas")
assert not root.find("apps")
root.delete("all")
assert not root.find("all")
root.delete("banana")
assert not root.find("banana")
assert root.find("bananas")
return True
def lowerCAmelCase_ ( ):
assert test_trie()
def lowerCAmelCase_ ( ):
UpperCamelCase__: Union[str, Any] = RadixNode()
UpperCamelCase__: Any = """banana bananas bandanas bandana band apple all beast""".split()
root.insert_many(__UpperCamelCase)
print("Words:" ,__UpperCamelCase)
print("Tree:")
root.print_tree()
if __name__ == "__main__":
main()
| 721 |
def lowerCAmelCase_ ( ):
for n in range(1 ,1_00_00_00):
yield n * (n + 1) // 2
def lowerCAmelCase_ ( A_):
UpperCamelCase__: int = 1
UpperCamelCase__: Dict = 2
while i * i <= n:
UpperCamelCase__: Any = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def lowerCAmelCase_ ( ):
return next(i for i in triangle_number_generator() if count_divisors(A_) > 5_00)
if __name__ == "__main__":
print(solution())
| 221 | 0 |
import requests
from bsa import BeautifulSoup
def __magic_name__ ( SCREAMING_SNAKE_CASE = "https://www.worldometers.info/coronavirus" ) -> dict:
_lowercase : Optional[Any] = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE ).text , 'html.parser' )
_lowercase : Dict = soup.findAll('h1' )
_lowercase : Tuple = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 66 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCAmelCase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCAmelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
UpperCAmelCase : set[int] = {ord(char) for char in VALID_CHARS}
UpperCAmelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def _SCREAMING_SNAKE_CASE ( a , a ) -> str | None:
__A : str = ""
__A : int
__A : int
__A : int
for keychar, cipherchar in zip(cycle(a ) , a ):
__A : List[Any] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(a )
return decoded
def _SCREAMING_SNAKE_CASE ( a ) -> list[str]:
__A : list[str] = []
for key in product(a , repeat=3 ):
__A : str = try_key(a , a )
if encoded is not None:
possibles.append(a )
return possibles
def _SCREAMING_SNAKE_CASE ( a , a ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def _SCREAMING_SNAKE_CASE ( a = "p059_cipher.txt" ) -> int:
__A : list[int]
__A : list[str]
__A : str
__A : str
__A : str = Path(a ).parent.joinpath(a ).read_text(encoding='utf-8' )
__A : Union[str, Any] = [int(a ) for number in data.strip().split(',' )]
__A : Any = filter_valid_chars(a )
for common_word in COMMON_WORDS:
__A : Tuple = filter_common_word(a , a )
if len(a ) == 1:
break
__A : Union[str, Any] = possibles[0]
return sum(ord(a ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 239 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"""
),
"""microsoft/deberta-v2-xxlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"""
),
}
class _UpperCAmelCase ( _lowerCamelCase ):
a = '''deberta-v2'''
def __init__( self , a__=128100 , a__=1536 , a__=24 , a__=24 , a__=6144 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=0 , a__=0.02 , a__=1E-7 , a__=False , a__=-1 , a__=0 , a__=True , a__=None , a__=0 , a__="gelu" , **a__ , ):
super().__init__(**a__ )
A_ : Union[str, Any] = hidden_size
A_ : List[str] = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : Tuple = intermediate_size
A_ : Union[str, Any] = hidden_act
A_ : Union[str, Any] = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : Union[str, Any] = type_vocab_size
A_ : Dict = initializer_range
A_ : List[str] = relative_attention
A_ : Tuple = max_relative_positions
A_ : Union[str, Any] = pad_token_id
A_ : Any = position_biased_input
# Backwards compatibility
if type(a__ ) == str:
A_ : Tuple = [x.strip() for x in pos_att_type.lower().split("""|""" )]
A_ : int = pos_att_type
A_ : List[Any] = vocab_size
A_ : Union[str, Any] = layer_norm_eps
A_ : Optional[int] = kwargs.get("""pooler_hidden_size""" , a__ )
A_ : Optional[int] = pooler_dropout
A_ : int = pooler_hidden_act
class _UpperCAmelCase ( _lowerCamelCase ):
@property
def _lowerCamelCase ( self ):
if self.task == "multiple-choice":
A_ : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : Optional[int] = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def _lowerCamelCase ( self ):
return 12
def _lowerCamelCase ( self , a__ , a__ = -1 , a__ = -1 , a__ = -1 , a__ = False , a__ = None , a__ = 3 , a__ = 40 , a__ = 40 , a__ = None , ):
A_ : Tuple = super().generate_dummy_inputs(preprocessor=a__ , framework=a__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 481 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 481 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
a_ = logging.get_logger(__name__)
a_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a_ = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
a_ = {
"""bert-base-uncased""": 512,
"""bert-large-uncased""": 512,
"""bert-base-cased""": 512,
"""bert-large-cased""": 512,
"""bert-base-multilingual-uncased""": 512,
"""bert-base-multilingual-cased""": 512,
"""bert-base-chinese""": 512,
"""bert-base-german-cased""": 512,
"""bert-large-uncased-whole-word-masking""": 512,
"""bert-large-cased-whole-word-masking""": 512,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 512,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 512,
"""bert-base-cased-finetuned-mrpc""": 512,
"""bert-base-german-dbmdz-cased""": 512,
"""bert-base-german-dbmdz-uncased""": 512,
"""TurkuNLP/bert-base-finnish-cased-v1""": 512,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 512,
"""wietsedv/bert-base-dutch-cased""": 512,
}
a_ = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class __lowerCAmelCase ( __UpperCamelCase ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = BertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
__lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __UpperCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __UpperCAmelCase ) != tokenize_chinese_chars
):
__lowerCamelCase = getattr(__UpperCAmelCase , normalizer_state.pop('''type''' ) )
__lowerCamelCase = do_lower_case
__lowerCamelCase = strip_accents
__lowerCamelCase = tokenize_chinese_chars
__lowerCamelCase = normalizer_class(**__UpperCAmelCase )
__lowerCamelCase = do_lower_case
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None ):
'''simple docstring'''
__lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__lowerCamelCase = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 175 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __a ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
__snake_case : Optional[Any] = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def __UpperCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
if os.name == "nt":
lowerCAmelCase_ : str = CursorInfo()
lowerCAmelCase_ : Dict = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowercase__ , ctypes.byref(lowercase__ ) )
lowerCAmelCase_ : str = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowercase__ , ctypes.byref(lowercase__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def __UpperCamelCase ( ) -> int:
'''simple docstring'''
if os.name == "nt":
lowerCAmelCase_ : int = CursorInfo()
lowerCAmelCase_ : int = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowercase__ , ctypes.byref(lowercase__ ) )
lowerCAmelCase_ : Tuple = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowercase__ , ctypes.byref(lowercase__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def __UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 600 | 0 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def a_ ( ):
'''simple docstring'''
lowercase__ : Union[str, Any] = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
lowercase__ : str = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ).convert('RGB' )
return image
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Any = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.weight""", f"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.bias""", f"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.weight""", f"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.bias""", f"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.qkv.weight""", f"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.weight""", f"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.bias""", f"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def a_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] ):
'''simple docstring'''
lowercase__ : Optional[int] = dct.pop(_lowerCAmelCase )
lowercase__ : Optional[int] = val
def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict ):
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowercase__ : Dict = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.q_bias""" )
lowercase__ : Optional[int] = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
lowercase__ : List[Any] = torch.cat((q_bias, torch.zeros_like(_lowerCAmelCase , requires_grad=_lowerCAmelCase ), v_bias) )
lowercase__ : Optional[int] = qkv_bias
def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Optional[int] = 364 if 'coco' in model_name else 224
lowercase__ : str = BlipaVisionConfig(image_size=_lowerCAmelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
lowercase__ : Union[str, Any] = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=_lowerCAmelCase ).to_dict()
elif "opt-6.7b" in model_name:
lowercase__ : Dict = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=_lowerCAmelCase ).to_dict()
elif "t5-xl" in model_name:
lowercase__ : Any = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowercase__ : List[Any] = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
lowercase__ : List[str] = BlipaConfig(vision_config=_lowerCAmelCase , text_config=_lowerCAmelCase )
return config, image_size
@torch.no_grad()
def a_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : List[Any]=False ):
'''simple docstring'''
lowercase__ : int = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
lowercase__ : Any = tokenizer('\n' , add_special_tokens=_lowerCAmelCase ).input_ids[0]
lowercase__ , lowercase__ : Tuple = get_blipa_config(_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
lowercase__ : Optional[int] = BlipaForConditionalGeneration(_lowerCAmelCase ).eval()
lowercase__ : List[Any] = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
lowercase__ , lowercase__ : int = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
lowercase__ : List[str] = 'cuda' if torch.cuda.is_available() else 'cpu'
lowercase__ , lowercase__ , lowercase__ : Dict = load_model_and_preprocess(
name=_lowerCAmelCase , model_type=_lowerCAmelCase , is_eval=_lowerCAmelCase , device=_lowerCAmelCase )
original_model.eval()
print('Done!' )
# update state dict keys
lowercase__ : Optional[int] = original_model.state_dict()
lowercase__ : int = create_rename_keys(_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowercase__ : List[str] = state_dict.pop(_lowerCAmelCase )
if key.startswith('Qformer.bert' ):
lowercase__ : Any = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
lowercase__ : str = key.replace('self' , 'attention' )
if "opt_proj" in key:
lowercase__ : List[str] = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
lowercase__ : str = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
lowercase__ : Dict = key.replace('opt' , 'language' )
if key.startswith('t5' ):
lowercase__ : Any = key.replace('t5' , 'language' )
lowercase__ : Optional[int] = val
# read in qv biases
read_in_q_v_bias(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ , lowercase__ : List[Any] = hf_model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert len(_lowerCAmelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
lowercase__ : str = load_demo_image()
lowercase__ : List[Any] = vis_processors['eval'](_lowerCAmelCase ).unsqueeze(0 ).to(_lowerCAmelCase )
lowercase__ : List[str] = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(_lowerCAmelCase )
# create processor
lowercase__ : Any = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=_lowerCAmelCase , image_std=_lowerCAmelCase )
lowercase__ : Any = BlipaProcessor(image_processor=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
lowercase__ : Any = processor(images=_lowerCAmelCase , return_tensors='pt' ).pixel_values.to(_lowerCAmelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase )
original_model.to(_lowerCAmelCase )
hf_model.to(_lowerCAmelCase )
with torch.no_grad():
if "opt" in model_name:
lowercase__ : List[str] = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
lowercase__ : str = hf_model(_lowerCAmelCase , _lowerCAmelCase ).logits
else:
lowercase__ : Dict = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
lowercase__ : Any = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
lowercase__ : Optional[int] = hf_model(_lowerCAmelCase , _lowerCAmelCase , labels=_lowerCAmelCase ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
lowercase__ : List[str] = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=_lowerCAmelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCAmelCase , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
lowercase__ : Optional[int] = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=_lowerCAmelCase )
else:
# cast to same type
lowercase__ : Optional[Any] = logits.dtype
assert torch.allclose(original_logits.to(_lowerCAmelCase ) , _lowerCAmelCase , atol=1E-2 )
print('Looks ok!' )
print('Generating a caption...' )
lowercase__ : int = ''
lowercase__ : Dict = tokenizer(_lowerCAmelCase , return_tensors='pt' ).input_ids.to(_lowerCAmelCase )
lowercase__ : int = original_model.generate({'image': original_pixel_values} )
lowercase__ : Optional[int] = hf_model.generate(
_lowerCAmelCase , _lowerCAmelCase , do_sample=_lowerCAmelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , _lowerCAmelCase )
lowercase__ : Optional[Any] = input_ids.shape[1]
lowercase__ : Tuple = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCAmelCase )
lowercase__ : Union[str, Any] = [text.strip() for text in output_text]
print('HF generation:' , _lowerCAmelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCAmelCase )
hf_model.save_pretrained(_lowerCAmelCase )
if push_to_hub:
processor.push_to_hub(f"""nielsr/{model_name}""" )
hf_model.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
_UpperCamelCase : Dict = argparse.ArgumentParser()
_UpperCamelCase : Any = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
_UpperCamelCase : Tuple = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 645 | """simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=1_0 , a=3 , a=2 , a=2 , a=2 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=0.9 , a=None , ) -> Optional[Any]:
lowercase__ : str = parent
lowercase__ : int = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Dict = patch_size
lowercase__ : Tuple = tubelet_size
lowercase__ : Optional[int] = num_frames
lowercase__ : Optional[int] = is_training
lowercase__ : int = use_labels
lowercase__ : Optional[int] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : Any = intermediate_size
lowercase__ : str = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : str = mask_ratio
lowercase__ : Optional[Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowercase__ : Optional[Any] = (image_size // patch_size) ** 2
lowercase__ : str = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowercase__ : str = int(mask_ratio * self.seq_length )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : int = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase__ : int = None
if self.use_labels:
lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Dict = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Tuple:
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]:
lowercase__ : Dict = VideoMAEModel(config=a )
model.to(a )
model.eval()
lowercase__ : Tuple = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]:
lowercase__ : str = VideoMAEForPreTraining(a )
model.to(a )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase__ : Any = torch.ones((self.num_masks,) )
lowercase__ : str = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowercase__ : Optional[int] = mask.expand(self.batch_size , -1 ).bool()
lowercase__ : str = model(a , a )
# model only returns predictions for masked patches
lowercase__ : str = mask.sum().item()
lowercase__ : int = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Tuple = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowerCamelCase__ : Optional[int] = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ : Any = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : str = False
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = VideoMAEModelTester(self )
lowercase__ : Optional[Any] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _UpperCAmelCase ( self , a , a , a=False ) -> Optional[int]:
lowercase__ : Union[str, Any] = copy.deepcopy(a )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase__ : Optional[Any] = torch.ones((self.model_tester.num_masks,) )
lowercase__ : Any = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowercase__ : Any = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowercase__ : Union[str, Any] = bool_masked_pos.to(a )
if return_labels:
if model_class in [
*get_values(a ),
]:
lowercase__ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def _UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Dict:
pass
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
lowercase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[Any] = VideoMAEModel.from_pretrained(a )
self.assertIsNotNone(a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
if not self.has_attentions:
pass
else:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = True
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks
lowercase__ : Any = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowercase__ : Optional[Any] = True
lowercase__ : int = False
lowercase__ : Any = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Optional[int] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Dict = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : str = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : List[Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ : List[str] = len(a )
# Check attention is always last and order is fine
lowercase__ : Optional[int] = True
lowercase__ : List[str] = True
lowercase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(a , a ) )
self.assertEqual(out_len + 1 , len(a ) )
lowercase__ : int = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _UpperCAmelCase ( self ) -> Optional[int]:
def check_hidden_states_output(a , a , a ):
lowercase__ : Optional[int] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[int] = outputs.hidden_states
lowercase__ : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(a ) , a )
lowercase__ : Optional[Any] = self.model_tester.seq_length - self.model_tester.num_masks
lowercase__ : Union[str, Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Tuple = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Union[str, Any] = True
check_hidden_states_output(a , a , a )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
def a_ ( ):
'''simple docstring'''
lowercase__ : int = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
lowercase__ : str = np.load(_lowerCAmelCase )
return list(_lowerCAmelCase )
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self ) -> Optional[Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
a )
lowercase__ : str = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**a )
# verify the logits
lowercase__ : str = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ : List[Any] = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(a )
lowercase__ : Optional[Any] = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : str = image_processor(a , return_tensors='pt' ).to(a )
# add boolean mask, indicating which patches to mask
lowercase__ : Union[str, Any] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
lowercase__ : str = torch.load(a )
# forward pass
with torch.no_grad():
lowercase__ : List[Any] = model(**a )
# verify the logits
lowercase__ : Dict = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowercase__ : List[str] = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=a )
self.assertEqual(outputs.logits.shape , a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowercase__ : List[Any] = torch.tensor([0.5_142] , device=a )
self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowercase__ : Tuple = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=a ).to(
a )
with torch.no_grad():
lowercase__ : Any = model(**a )
lowercase__ : List[Any] = torch.tensor(torch.tensor([0.6_469] ) , device=a )
self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
| 645 | 1 |
import datasets
from .evaluate import evaluate
SCREAMING_SNAKE_CASE = "\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n"
SCREAMING_SNAKE_CASE = "\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n"
SCREAMING_SNAKE_CASE = "\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the CUAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\n 'aupr': Area Under the Precision-Recall curve\n 'prec_at_80_recall': Precision at 80% recall\n 'prec_at_90_recall': Precision at 90% recall\nExamples:\n >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> cuad_metric = datasets.load_metric(\"cuad\")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def A__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
UpperCAmelCase_ = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
UpperCAmelCase_ = evaluate(dataset=__lowercase , predictions=__lowercase )
return score
| 579 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _snake_case ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase):
UpperCamelCase__ : List[Any] =StableUnCLIPImgaImgPipeline
UpperCamelCase__ : Tuple =TEXT_GUIDED_IMAGE_VARIATION_PARAMS
UpperCamelCase__ : Dict =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase__ : int =frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase__ : Optional[Any] =frozenset([])
def A__ ( self : Optional[Any] ):
lowercase__ = 32
lowercase__ = embedder_hidden_size
# image encoding components
lowercase__ = CLIPImageProcessor(crop_size=32, size=32 )
torch.manual_seed(0 )
lowercase__ = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__lowercase, projection_dim=__lowercase, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ) )
# regular denoising components
torch.manual_seed(0 )
lowercase__ = StableUnCLIPImageNormalizer(embedding_dim=__lowercase )
lowercase__ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowercase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowercase__ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=__lowercase, projection_dim=32, intermediate_size=37, layer_norm_eps=1e-0_5, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) )
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
sample_size=32, in_channels=4, out_channels=4, down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), up_block_types=("UpBlock2D", "CrossAttnUpBlock2D"), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type="projection", projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=__lowercase, layers_per_block=1, upcast_attention=__lowercase, use_linear_projection=__lowercase, )
torch.manual_seed(0 )
lowercase__ = DDIMScheduler(
beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, prediction_type="v_prediction", set_alpha_to_one=__lowercase, steps_offset=1, )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL()
lowercase__ = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def A__ ( self : Dict, __lowercase : Tuple, __lowercase : Union[str, Any]=0, __lowercase : Tuple=True ):
if str(__lowercase ).startswith("mps" ):
lowercase__ = torch.manual_seed(__lowercase )
else:
lowercase__ = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
lowercase__ = floats_tensor((1, 3, 32, 32), rng=random.Random(__lowercase ) ).to(__lowercase )
if pil_image:
lowercase__ = input_image * 0.5 + 0.5
lowercase__ = input_image.clamp(0, 1 )
lowercase__ = input_image.cpu().permute(0, 2, 3, 1 ).float().numpy()
lowercase__ = DiffusionPipeline.numpy_to_pil(__lowercase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def A__ ( self : str ):
lowercase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = StableUnCLIPImgaImgPipeline(**__lowercase )
lowercase__ = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
lowercase__ = self.get_dummy_inputs(__lowercase )
inputs.update({"image_embeds": None} )
lowercase__ = sd_pipe(**__lowercase ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self : List[str] ):
lowercase__ = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=__lowercase )
def A__ ( self : Optional[Any] ):
lowercase__ = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=__lowercase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", )
def A__ ( self : Dict ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__lowercase )
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase):
def A__ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self : List[Any] ):
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
lowercase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
lowercase__ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img", torch_dtype=torch.floataa )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase__ = pipe(__lowercase, "anime turle", generator=__lowercase, output_type="np" )
lowercase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase, __lowercase )
def A__ ( self : Any ):
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
lowercase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
lowercase__ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img", torch_dtype=torch.floataa )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase__ = pipe(__lowercase, "anime turle", generator=__lowercase, output_type="np" )
lowercase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase, __lowercase )
def A__ ( self : Optional[int] ):
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img", torch_dtype=torch.floataa )
lowercase__ = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ = pipe(
__lowercase, "anime turtle", num_inference_steps=2, output_type="np", )
lowercase__ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 413 | 0 |
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
UpperCAmelCase__ : List[Any] = 1.5
UpperCAmelCase__ : str = int(factor * num_class_images )
UpperCAmelCase__ : List[Any] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=lowerCAmelCase__ , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=lowerCAmelCase__ )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
UpperCAmelCase__ : str = client.query(text=lowerCAmelCase__ )
if len(lowerCAmelCase__ ) >= factor * num_class_images or num_images > 1E4:
break
else:
UpperCAmelCase__ : Tuple = int(factor * num_images )
UpperCAmelCase__ : Optional[Any] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=lowerCAmelCase__ , aesthetic_weight=0.1 , )
UpperCAmelCase__ : Any = 0
UpperCAmelCase__ : List[str] = 0
UpperCAmelCase__ : Tuple = tqdm(desc='''downloading real regularization images''' , total=lowerCAmelCase__ )
with open(F"""{class_data_dir}/caption.txt""" , '''w''' ) as fa, open(F"""{class_data_dir}/urls.txt""" , '''w''' ) as fa, open(
F"""{class_data_dir}/images.txt""" , '''w''' ) as fa:
while total < num_class_images:
UpperCAmelCase__ : List[Any] = class_images[count]
count += 1
try:
UpperCAmelCase__ : Dict = requests.get(images['''url'''] )
if img.status_code == 2_00:
UpperCAmelCase__ : str = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def a__ ( ) -> str:
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser('''''' , add_help=lowerCAmelCase__ )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=lowerCAmelCase__ , type=lowerCAmelCase__ )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=lowerCAmelCase__ , type=lowerCAmelCase__ )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=2_00 , type=lowerCAmelCase__ )
return parser.parse_args()
if __name__ == "__main__":
UpperCamelCase__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 312 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> bool:
UpperCAmelCase__ : List[Any] = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 312 | 1 |
def __lowercase ( lowerCamelCase : int ):
if num <= 0:
raise ValueError('Input must be a positive integer' )
UpperCamelCase_ : int = [True] * (num + 1)
UpperCamelCase_ : str = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , lowerCamelCase_ ):
UpperCamelCase_ : Optional[int] = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = int(input('Enter a positive integer: ').strip())
print(prime_sieve_eratosthenes(user_num))
| 417 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__magic_name__ : Dict =re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
__magic_name__ : int =10
__magic_name__ : Union[str, Any] =2_56
def __snake_case ( lowerCamelCase_ : List[str] ):
'''simple docstring'''
if len(lowerCamelCase_ ) < MIN_NUM_TOKENS:
return None
__magic_name__ = MinHash(num_perm=lowerCamelCase_ )
for token in set(lowerCamelCase_ ):
min_hash.update(token.encode() )
return min_hash
def __snake_case ( lowerCamelCase_ : str ):
'''simple docstring'''
return {t for t in NON_ALPHA.split(lowerCamelCase_ ) if len(t.strip() ) > 0}
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , *,
_lowerCamelCase : float = 0.85 , ) -> Optional[Any]:
__magic_name__ = duplication_jaccard_threshold
__magic_name__ = NUM_PERM
__magic_name__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__magic_name__ = defaultdict(_lowerCamelCase )
def __A ( self : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : MinHash ) -> None:
__magic_name__ = self._index.query(_lowerCamelCase )
if code_key in self._index.keys:
print(f'Duplicate key {code_key}' )
return
self._index.insert(_lowerCamelCase , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_lowerCamelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_lowerCamelCase )
def __A ( self : Union[str, Any] ) -> List[List[Dict]]:
__magic_name__ = []
for base, duplicates in self._duplicate_clusters.items():
__magic_name__ = [base] + list(_lowerCamelCase )
# reformat the cluster to be a list of dict
__magic_name__ = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(_lowerCamelCase )
return duplicate_clusters
def __A ( self : Tuple , _lowerCamelCase : Tuple ) -> None:
__magic_name__ = self.get_duplicate_clusters()
with open(_lowerCamelCase , "w" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
def __snake_case ( lowerCamelCase_ : List[Any] ):
'''simple docstring'''
__magic_name__ , __magic_name__ = element
__magic_name__ = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def __snake_case ( lowerCamelCase_ : Type[Dataset] ):
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCamelCase_ , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def __snake_case ( lowerCamelCase_ : Type[Dataset] , lowerCamelCase_ : float ):
'''simple docstring'''
__magic_name__ = DuplicationIndex(duplication_jaccard_threshold=lowerCamelCase_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCamelCase_ ) ) , max_queue_size=100 ) ):
di.add(lowerCamelCase_ , lowerCamelCase_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def __snake_case ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
'''simple docstring'''
__magic_name__ = get_tokens(lowerCamelCase_ )
__magic_name__ = get_tokens(lowerCamelCase_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__magic_name__ : List[str] =None
def __snake_case ( lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
__magic_name__ = []
for elementa in cluster:
__magic_name__ = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
__magic_name__ = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(lowerCamelCase_ , lowerCamelCase_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__magic_name__ = 1
extremes.append(lowerCamelCase_ )
return extremes
def __snake_case ( lowerCamelCase_ : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
global _shared_dataset
__magic_name__ = dataset
__magic_name__ = []
__magic_name__ = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCamelCase_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCamelCase_ , lowerCamelCase_ , ) , total=len(lowerCamelCase_ ) , ):
extremes_list.append(lowerCamelCase_ )
return extremes_list
def __snake_case ( lowerCamelCase_ : Type[Dataset] , lowerCamelCase_ : float = 0.85 ):
'''simple docstring'''
__magic_name__ = make_duplicate_clusters(lowerCamelCase_ , lowerCamelCase_ )
__magic_name__ = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
__magic_name__ = {}
__magic_name__ = find_extremes(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for extremes in extremes_clusters:
for element in extremes:
__magic_name__ = element
__magic_name__ = duplicate_indices - set(extreme_dict.keys() )
__magic_name__ = dataset.filter(lambda lowerCamelCase_ , lowerCamelCase_ : idx not in remove_indices , with_indices=lowerCamelCase_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__magic_name__ = element["base_index"] in extreme_dict
if element["is_extreme"]:
__magic_name__ = extreme_dict[element["base_index"]]["copies"]
print(F'Original dataset size: {len(lowerCamelCase_ )}' )
print(F'Number of duplicate clusters: {len(lowerCamelCase_ )}' )
print(F'Files in duplicate cluster: {len(lowerCamelCase_ )}' )
print(F'Unique files in duplicate cluster: {len(lowerCamelCase_ )}' )
print(F'Filtered dataset size: {len(lowerCamelCase_ )}' )
return ds_filter, duplicate_clusters
| 664 | 0 |
'''simple docstring'''
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCAmelCase : str = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : str = []
lowerCAmelCase : str = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
lowerCAmelCase : List[Any] = [
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': f'🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results',
'''emoji''': True,
},
}
]
lowerCAmelCase : Dict = 0
for log in Path().glob('''*.log'''):
lowerCAmelCase : List[Any] = 0
with open(log, '''r''') as f:
for line in f:
lowerCAmelCase : List[str] = json.loads(line)
if line.get('''nodeid''', '''''') != "":
lowerCAmelCase : List[Any] = line['''nodeid''']
if line.get('''duration''', None) is not None:
lowerCAmelCase : str = f'{line["duration"]:.4f}'
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCAmelCase : Dict = []
log.unlink()
lowerCAmelCase : Optional[int] = ''''''
lowerCAmelCase : Optional[Any] = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
lowerCAmelCase : Any = []
lowerCAmelCase : str = {}
for test in failed_tests:
lowerCAmelCase : Optional[int] = test[0].split('''::''')
lowerCAmelCase : Optional[Any] = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
lowerCAmelCase : List[str] = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCAmelCase : Optional[Any] = [test[0] for test in failed_table]
lowerCAmelCase : str = list(set(files))
# Count number of instances in failed_tests
lowerCAmelCase : Tuple = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCAmelCase : List[str] = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_0_0_0:
lowerCAmelCase : List[Any] = '''Too many failed tests, please see the full report in the Action results.'''
lowerCAmelCase : Tuple = len(err) + 1_0
lowerCAmelCase : List[Any] = message[: 3_0_0_0 - offset] + f'\n...\n```\n{err}'
print(f'### {message}')
else:
lowerCAmelCase : List[Any] = '''No failed tests! 🤗'''
print(f'## {message}')
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
lowerCAmelCase : List[str] = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
lowerCAmelCase : int = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
lowerCAmelCase : List[Any] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': f'https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
payload.append(action_button)
lowerCAmelCase : str = {
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': f'Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}',
}
],
}
payload.append(date_report)
lowerCAmelCase : Optional[Any] = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
lowerCAmelCase : Union[str, Any] = response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCAmelCase : Optional[int] = ''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCAmelCase : Union[str, Any] = row[0]
else:
lowerCAmelCase : Tuple = ''''''
lowerCAmelCase : Tuple = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': f'Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```',
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
) | 718 |
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __lowerCAmelCase ( lowerCamelCase : bytes , lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = f'''{sampling_rate}'''
__lowerCAmelCase = "1"
__lowerCAmelCase = "f32le"
__lowerCAmelCase = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(lowerCamelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
__lowerCAmelCase = ffmpeg_process.communicate(lowerCamelCase )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
__lowerCAmelCase = output_stream[0]
__lowerCAmelCase = np.frombuffer(lowerCamelCase , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : str = "f32le" , ):
'''simple docstring'''
__lowerCAmelCase = f'''{sampling_rate}'''
__lowerCAmelCase = "1"
if format_for_conversion == "s16le":
__lowerCAmelCase = 2
elif format_for_conversion == "f32le":
__lowerCAmelCase = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
__lowerCAmelCase = platform.system()
if system == "Linux":
__lowerCAmelCase = "alsa"
__lowerCAmelCase = "default"
elif system == "Darwin":
__lowerCAmelCase = "avfoundation"
__lowerCAmelCase = ":0"
elif system == "Windows":
__lowerCAmelCase = "dshow"
__lowerCAmelCase = "default"
__lowerCAmelCase = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
__lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
__lowerCAmelCase = _ffmpeg_stream(lowerCamelCase , lowerCamelCase )
for item in iterator:
yield item
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Union[Tuple[float, float], float]] = None , lowerCamelCase : str = "f32le" , ):
'''simple docstring'''
if stream_chunk_s is not None:
__lowerCAmelCase = stream_chunk_s
else:
__lowerCAmelCase = chunk_length_s
__lowerCAmelCase = ffmpeg_microphone(lowerCamelCase , lowerCamelCase , format_for_conversion=lowerCamelCase )
if format_for_conversion == "s16le":
__lowerCAmelCase = np.intaa
__lowerCAmelCase = 2
elif format_for_conversion == "f32le":
__lowerCAmelCase = np.floataa
__lowerCAmelCase = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
__lowerCAmelCase = chunk_length_s / 6
__lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowerCamelCase , (int, float) ):
__lowerCAmelCase = [stride_length_s, stride_length_s]
__lowerCAmelCase = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
__lowerCAmelCase = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
__lowerCAmelCase = datetime.datetime.now()
__lowerCAmelCase = datetime.timedelta(seconds=lowerCamelCase )
for item in chunk_bytes_iter(lowerCamelCase , lowerCamelCase , stride=(stride_left, stride_right) , stream=lowerCamelCase ):
# Put everything back in numpy scale
__lowerCAmelCase = np.frombuffer(item["raw"] , dtype=lowerCamelCase )
__lowerCAmelCase = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
__lowerCAmelCase = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __lowerCAmelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Tuple[int, int] , lowerCamelCase : bool = False ):
'''simple docstring'''
__lowerCAmelCase = B""
__lowerCAmelCase , __lowerCAmelCase = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
__lowerCAmelCase = 0
for raw in iterator:
acc += raw
if stream and len(lowerCamelCase ) < chunk_len:
__lowerCAmelCase = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowerCamelCase ) >= chunk_len:
# We are flushing the accumulator
__lowerCAmelCase = (_stride_left, stride_right)
__lowerCAmelCase = {"raw": acc[:chunk_len], "stride": stride}
if stream:
__lowerCAmelCase = False
yield item
__lowerCAmelCase = stride_left
__lowerCAmelCase = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowerCamelCase ) > stride_left:
__lowerCAmelCase = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
__lowerCAmelCase = False
yield item
def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = 2**24 # 16Mo
try:
with subprocess.Popen(lowerCamelCase , stdout=subprocess.PIPE , bufsize=lowerCamelCase ) as ffmpeg_process:
while True:
__lowerCAmelCase = ffmpeg_process.stdout.read(lowerCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error | 39 | 0 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
snake_case_ = cst_fwd.get(SCREAMING_SNAKE_CASE__ , np.inf )
snake_case_ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
snake_case_ = new_cost_f
snake_case_ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
snake_case_ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = -1
snake_case_ = set()
snake_case_ = set()
snake_case_ = {source: 0}
snake_case_ = {destination: 0}
snake_case_ = {source: None}
snake_case_ = {destination: None}
snake_case_ = PriorityQueue()
snake_case_ = PriorityQueue()
snake_case_ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
snake_case_, snake_case_ = queue_forward.get()
visited_forward.add(SCREAMING_SNAKE_CASE__ )
snake_case_, snake_case_ = queue_backward.get()
visited_backward.add(SCREAMING_SNAKE_CASE__ )
snake_case_ = pass_and_relaxation(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
snake_case_ = pass_and_relaxation(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
snake_case_ = shortest_distance
return shortest_path_distance
lowerCAmelCase_ = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
lowerCAmelCase_ = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 39 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowerCAmelCase_ = sys.version_info >= (3, 10)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ):
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE__ )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : float
SCREAMING_SNAKE_CASE : str
SCREAMING_SNAKE_CASE : bool
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 42
SCREAMING_SNAKE_CASE : str = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : Optional[bool] = None
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = "titi"
SCREAMING_SNAKE_CASE : Any = "toto"
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = "titi"
SCREAMING_SNAKE_CASE : Optional[Any] = "toto"
SCREAMING_SNAKE_CASE : Any = 42
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : BasicEnum = "toto"
def snake_case__( self : Tuple ) ->List[str]:
snake_case_ = BasicEnum(self.foo )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : MixedTypeEnum = "toto"
def snake_case__( self : Union[str, Any] ) ->Dict:
snake_case_ = MixedTypeEnum(self.foo )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[float] = field(default=__A , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE : Optional[str] = None
SCREAMING_SNAKE_CASE : Optional[List[str]] = list_field(default=[] )
SCREAMING_SNAKE_CASE : Optional[List[int]] = list_field(default=[] )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[int] = list_field(default=[] )
SCREAMING_SNAKE_CASE : List[int] = list_field(default=[1, 2, 3] )
SCREAMING_SNAKE_CASE : List[str] = list_field(default=["Hallo", "Bonjour", "Hello"] )
SCREAMING_SNAKE_CASE : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[int] = field()
SCREAMING_SNAKE_CASE : str = field()
SCREAMING_SNAKE_CASE : BasicEnum = field()
def snake_case__( self : Optional[Any] ) ->Tuple:
snake_case_ = BasicEnum(self.required_enum )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : "BasicEnum" = field()
SCREAMING_SNAKE_CASE : "Optional[bool]" = None
SCREAMING_SNAKE_CASE : "str" = field(default="toto" , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE : "List[str]" = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : bool | None = None
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int | None = None
SCREAMING_SNAKE_CASE : float | None = field(default=__A , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE : str | None = None
SCREAMING_SNAKE_CASE : list[str] | None = list_field(default=[] )
SCREAMING_SNAKE_CASE : list[int] | None = list_field(default=[] )
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Dict , _UpperCamelCase : argparse.ArgumentParser , _UpperCamelCase : argparse.ArgumentParser ) ->str:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
snake_case_ = {k: v for k, v in vars(_UpperCamelCase ).items() if k != '''container'''}
snake_case_ = {k: v for k, v in vars(_UpperCamelCase ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , _UpperCamelCase ) and yy.get('''choices''' , _UpperCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](_UpperCamelCase ) , yy['''type'''](_UpperCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Optional[Any] ) ->Dict:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--bar''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--baz''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--flag''' , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs='''?''' )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((snake_case_), ) = parser.parse_args_into_dataclasses(_UpperCamelCase , look_for_args_file=_UpperCamelCase )
self.assertFalse(example.flag )
def snake_case__( self : Tuple ) ->Optional[int]:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=4_2 , type=_UpperCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_UpperCamelCase , help='''help message''' )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Tuple ) ->Tuple:
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs='''?''' )
expected.add_argument('''--baz''' , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=_UpperCamelCase , dest='''baz''' )
expected.add_argument('''--opt''' , type=_UpperCamelCase , default=_UpperCamelCase )
snake_case_ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCamelCase )
for dataclass_type in dataclass_types:
snake_case_ = HfArgumentParser(_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
def snake_case__( self : Tuple ) ->Tuple:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 4_2] , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
snake_case_ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
snake_case_ = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
snake_case_ = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
snake_case_ = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 4_2 )
snake_case_ = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def snake_case__( self : Tuple ) ->Union[str, Any]:
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Literal["titi", "toto", 42] = "toto"
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 4_2) , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
snake_case_ = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
snake_case_ = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 4_2 )
def snake_case__( self : List[str] ) ->int:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_UpperCamelCase )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_UpperCamelCase )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_UpperCamelCase )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(
_UpperCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
snake_case_ = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(_UpperCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def snake_case__( self : Optional[Any] ) ->List[Any]:
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=_UpperCamelCase , type=_UpperCamelCase )
expected.add_argument('''--bar''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''help message''' )
expected.add_argument('''--baz''' , default=_UpperCamelCase , type=_UpperCamelCase )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_UpperCamelCase )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_UpperCamelCase )
snake_case_ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCamelCase )
for dataclass_type in dataclass_types:
snake_case_ = HfArgumentParser(_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , bar=_UpperCamelCase , baz=_UpperCamelCase , ces=[] , des=[] ) )
snake_case_ = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(_UpperCamelCase , Namespace(foo=1_2 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def snake_case__( self : Union[str, Any] ) ->Optional[int]:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--required_str''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_UpperCamelCase , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : List[str] ) ->int:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_UpperCamelCase , )
expected.add_argument('''--opt''' , type=_UpperCamelCase , default=_UpperCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_UpperCamelCase , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Dict ) ->Any:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
snake_case_ = parser.parse_dict(_UpperCamelCase )[0]
snake_case_ = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : int ) ->Dict:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 4_2,
}
self.assertRaises(_UpperCamelCase , parser.parse_dict , _UpperCamelCase , allow_extra_keys=_UpperCamelCase )
def snake_case__( self : str ) ->Tuple:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(_UpperCamelCase , '''temp_json''' )
os.mkdir(_UpperCamelCase )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
snake_case_ = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Optional[int] ) ->str:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(_UpperCamelCase , '''temp_yaml''' )
os.mkdir(_UpperCamelCase )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
snake_case_ = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Any ) ->Any:
snake_case_ = HfArgumentParser(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase ) | 39 | 1 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : Tuple = [randint(-1000, 1000 ) for i in range(10 )]
_SCREAMING_SNAKE_CASE : int = randint(-5000, 5000 )
return (arr, r)
UpperCamelCase__ =make_dataset()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
for triplet in permutations(__lowerCamelCase, 3 ):
if sum(__lowerCamelCase ) == target:
return tuple(sorted(__lowerCamelCase ) )
return (0, 0, 0)
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
arr.sort()
_SCREAMING_SNAKE_CASE : List[str] = len(__lowerCamelCase )
for i in range(n - 1 ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : Optional[int] = "\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n"
_SCREAMING_SNAKE_CASE : Dict = "\ntriplet_sum1(*dataset)\n"
_SCREAMING_SNAKE_CASE : str = "\ntriplet_sum2(*dataset)\n"
_SCREAMING_SNAKE_CASE : Optional[int] = repeat(setup=__lowerCamelCase, stmt=__lowerCamelCase, repeat=5, number=10000 )
_SCREAMING_SNAKE_CASE : Tuple = repeat(setup=__lowerCamelCase, stmt=__lowerCamelCase, repeat=5, number=10000 )
return (min(__lowerCamelCase ), min(__lowerCamelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase__ =solution_times()
print(f"The time for naive implementation is {times[0]}.")
print(f"The time for optimized implementation is {times[1]}.") | 381 |
from __future__ import annotations
UpperCamelCase__ =[True] * 100_0001
UpperCamelCase__ =2
while i * i <= 100_0000:
if seive[i]:
for j in range(i * i, 100_0001, i):
UpperCamelCase__ =False
i += 1
def lowerCamelCase__ (__lowerCamelCase ):
return seive[n]
def lowerCamelCase__ (__lowerCamelCase ):
return any(digit in "02468" for digit in str(__lowerCamelCase ) )
def lowerCamelCase__ (__lowerCamelCase = 1000000 ):
_SCREAMING_SNAKE_CASE : Optional[Any] = [2] # result already includes the number 2.
for num in range(3, limit + 1, 2 ):
if is_prime(__lowerCamelCase ) and not contains_an_even_digit(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[Any] = str(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = [int(str_num[j:] + str_num[:j] ) for j in range(len(__lowerCamelCase ) )]
if all(is_prime(__lowerCamelCase ) for i in list_nums ):
result.append(__lowerCamelCase )
return result
def lowerCamelCase__ ():
return len(find_circular_primes() )
if __name__ == "__main__":
print(f"{len(find_circular_primes()) = }") | 381 | 1 |
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class A_ :
def __init__( self: Optional[Any] ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = str(id_ )
_lowerCamelCase : Tuple = None
_lowerCamelCase : str = None
_lowerCamelCase : List[Any] = []
_lowerCamelCase : Union[str, Any] = {} # {vertex:distance}
def __lt__( self: Optional[int] ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
return self.key < other.key
def __repr__( self: Optional[Any] ):
'''simple docstring'''
return self.id
def _lowercase ( self: Any ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
self.neighbors.append(lowerCAmelCase_ )
def _lowercase ( self: List[str] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = weight
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , A_ )
graph[b - 1].add_edge(graph[a - 1] , A_ )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = []
for u in graph:
_lowerCamelCase : Any = math.inf
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : int = graph[:]
while q:
_lowerCamelCase : Any = min(A_ )
q.remove(A_ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_lowerCamelCase : Tuple = u
_lowerCamelCase : List[Any] = u.edges[v.id]
for i in range(1 , len(A_ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Tuple:
'''simple docstring'''
for u in graph:
_lowerCamelCase : Optional[Any] = math.inf
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : List[Any] = list(A_ )
hq.heapify(A_ )
while h:
_lowerCamelCase : Tuple = hq.heappop(A_ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_lowerCamelCase : str = u
_lowerCamelCase : Union[str, Any] = u.edges[v.id]
hq.heapify(A_ )
for i in range(1 , len(A_ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def lowerCamelCase_( ) -> Any:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod() | 46 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : int = tempfile.mkdtemp()
# fmt: off
a_ : str = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
a_ : Tuple = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
a_ : Tuple = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
a_ : Tuple = {"""unk_token""": """<unk>"""}
a_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
a_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase_ ) )
a_ : int = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
a_ : int = os.path.join(self.tmpdirname , lowerCAmelCase_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
def _lowerCAmelCase ( self , **lowerCAmelCase_ ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def _lowerCAmelCase ( self , **lowerCAmelCase_ ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def _lowerCAmelCase ( self , **lowerCAmelCase_ ):
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
a_ : Tuple = [Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[Any] = self.get_tokenizer()
a_ : Any = self.get_rust_tokenizer()
a_ : Any = self.get_image_processor()
a_ : List[Any] = CLIPProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
a_ : Dict = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase_ )
a_ : str = CLIPProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
a_ : List[Any] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a_ : Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
a_ : Any = self.get_image_processor(do_normalize=lowerCAmelCase_ , padding_value=1.0 )
a_ : Union[str, Any] = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = self.get_image_processor()
a_ : List[str] = self.get_tokenizer()
a_ : str = CLIPProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
a_ : List[str] = self.prepare_image_inputs()
a_ : Dict = image_processor(lowerCAmelCase_ , return_tensors="""np""" )
a_ : List[Any] = processor(images=lowerCAmelCase_ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[int] = self.get_image_processor()
a_ : List[Any] = self.get_tokenizer()
a_ : str = CLIPProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
a_ : Optional[Any] = """lower newer"""
a_ : List[str] = processor(text=lowerCAmelCase_ )
a_ : Union[str, Any] = tokenizer(lowerCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Tuple = self.get_image_processor()
a_ : Union[str, Any] = self.get_tokenizer()
a_ : Optional[Any] = CLIPProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
a_ : Dict = """lower newer"""
a_ : Optional[int] = self.prepare_image_inputs()
a_ : str = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[Any] = self.get_image_processor()
a_ : str = self.get_tokenizer()
a_ : Union[str, Any] = CLIPProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
a_ : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a_ : int = processor.batch_decode(lowerCAmelCase_ )
a_ : int = tokenizer.batch_decode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[int] = self.get_image_processor()
a_ : List[str] = self.get_tokenizer()
a_ : int = CLIPProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
a_ : Dict = """lower newer"""
a_ : Optional[Any] = self.prepare_image_inputs()
a_ : Any = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 577 | 0 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_UpperCamelCase = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def _lowercase ( lowercase__ ):
for pegasus_name, hf_name in PATTERNS:
__lowerCAmelCase : Dict = k.replace(lowercase__ , lowercase__ )
return k
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : Tuple = DEFAULTS.copy()
cfg_kwargs.update(lowercase__ )
__lowerCAmelCase : List[Any] = PegasusConfig(**lowercase__ )
__lowerCAmelCase : Union[str, Any] = PegasusForConditionalGeneration(lowercase__ )
__lowerCAmelCase : List[str] = torch_model.model.state_dict()
__lowerCAmelCase : Union[str, Any] = {}
for k, v in tf_weights.items():
__lowerCAmelCase : int = rename_state_dict_key(lowercase__ )
if new_k not in sd:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
__lowerCAmelCase : List[str] = v.T
__lowerCAmelCase : List[str] = torch.tensor(lowercase__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
__lowerCAmelCase : Dict = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
__lowerCAmelCase : Tuple = mapping['''shared.weight''']
__lowerCAmelCase : List[str] = mapping['''shared.weight''']
__lowerCAmelCase : Union[str, Any] = {k: torch.zeros_like(lowercase__ ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**lowercase__ )
__lowerCAmelCase, __lowerCAmelCase : List[str] = torch_model.model.load_state_dict(lowercase__ , strict=lowercase__ )
__lowerCAmelCase : Dict = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def _lowercase ( lowercase__="./ckpt/aeslc/model.ckpt-32000" ):
__lowerCAmelCase : Dict = tf.train.list_variables(lowercase__ )
__lowerCAmelCase : Dict = {}
__lowerCAmelCase : List[Any] = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(lowercase__ , desc='''converting tf checkpoint to dict''' ):
__lowerCAmelCase : Optional[int] = any(pat in name for pat in ignore_name )
if skip_key:
continue
__lowerCAmelCase : List[Any] = tf.train.load_variable(lowercase__ , lowercase__ )
__lowerCAmelCase : int = array
return tf_weights
def _lowercase ( lowercase__ , lowercase__ ):
# save tokenizer first
__lowerCAmelCase : Union[str, Any] = Path(lowercase__ ).parent.name
__lowerCAmelCase : Optional[Any] = task_specific_params[f"""summarization_{dataset}"""]['''max_position_embeddings''']
__lowerCAmelCase : List[str] = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=lowercase__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(lowercase__ )
# convert model
__lowerCAmelCase : List[Any] = get_tf_weights_as_numpy(lowercase__ )
__lowerCAmelCase : int = task_specific_params[f"""summarization_{dataset}"""]
if dataset == "large":
__lowerCAmelCase : str = task_specific_params
__lowerCAmelCase : Dict = convert_pegasus(lowercase__ , lowercase__ )
torch_model.save_pretrained(lowercase__ )
__lowerCAmelCase : Union[str, Any] = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(lowercase__ , Path(lowercase__ ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
_UpperCamelCase = parser.parse_args()
if args.save_dir is None:
_UpperCamelCase = Path(args.tf_ckpt_path).parent.name
_UpperCamelCase = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 583 |
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase (_UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase = FunnelTokenizer
_UpperCamelCase = FunnelTokenizerFast
_UpperCamelCase = True
_UpperCamelCase = True
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
super().setUp()
__lowerCAmelCase : Optional[Any] = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def UpperCamelCase__ ( self , **A_ ) ->str:
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase__ ( self , **A_ ) ->Optional[int]:
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase__ ( self , A_ ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = '''UNwant\u00E9d,running'''
__lowerCAmelCase : Optional[Any] = '''unwanted, running'''
return input_text, output_text
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : int = self.tokenizer_class(self.vocab_file )
__lowerCAmelCase : List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A_ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [7, 4, 5, 10, 8, 9] )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Dict = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
__lowerCAmelCase : List[Any] = tokenizer('''UNwant\u00E9d,running''' )
__lowerCAmelCase : List[str] = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
__lowerCAmelCase : Union[str, Any] = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
| 583 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : str =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Union[str, Any] ={
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = """gpt_bigcode"""
__snake_case = ["""past_key_values"""]
__snake_case = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , _lowercase=50257 , _lowercase=1024 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=None , _lowercase="gelu_pytorch_tanh" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1E-5 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=50256 , _lowercase=50256 , _lowercase=True , _lowercase=True , _lowercase=True , **_lowercase , ) -> Optional[int]:
_lowerCamelCase : Dict = vocab_size
_lowerCamelCase : str = n_positions
_lowerCamelCase : Tuple = n_embd
_lowerCamelCase : Any = n_layer
_lowerCamelCase : Optional[int] = n_head
_lowerCamelCase : Optional[Any] = n_inner
_lowerCamelCase : List[Any] = activation_function
_lowerCamelCase : List[Any] = resid_pdrop
_lowerCamelCase : List[str] = embd_pdrop
_lowerCamelCase : Union[str, Any] = attn_pdrop
_lowerCamelCase : str = layer_norm_epsilon
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Optional[Any] = scale_attn_weights
_lowerCamelCase : List[Any] = use_cache
_lowerCamelCase : List[str] = attention_softmax_in_fpaa
_lowerCamelCase : List[str] = scale_attention_softmax_in_fpaa
_lowerCamelCase : Dict = multi_query
_lowerCamelCase : Optional[int] = bos_token_id
_lowerCamelCase : Optional[int] = eos_token_id
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
| 434 | """simple docstring"""
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Tuple:
if height >= 1:
move_tower(height - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
move_disk(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
move_tower(height - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->List[str]:
print('''moving disk from''' , SCREAMING_SNAKE_CASE_ , '''to''' , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( ) ->Optional[Any]:
_lowerCamelCase : Optional[int] = int(input('''Height of hanoi: ''' ).strip() )
move_tower(SCREAMING_SNAKE_CASE_ , '''A''' , '''B''' , '''C''' )
if __name__ == "__main__":
main()
| 434 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ : List[Any] = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : int = ['YolosFeatureExtractor']
UpperCamelCase__ : int = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : int = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 496 | '''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ : int = logging.get_logger(__name__)
UpperCamelCase__ : Optional[Any] = {'vocab_file': 'vocab.json'}
UpperCamelCase__ : Tuple = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
UpperCamelCase__ : List[Any] = {'mgp-str': 27}
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : Any = VOCAB_FILES_NAMES
UpperCAmelCase_ : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_="[GO]" ,lowerCamelCase_="[GO]" ,lowerCamelCase_="[s]" ,lowerCamelCase_="[GO]" ,**lowerCamelCase_ ) -> str:
'''simple docstring'''
super().__init__(
unk_token=lowerCamelCase_ ,bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,**lowerCamelCase_ ,)
with open(lowerCamelCase_ ,encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase__ : Optional[Any] = json.load(lowerCamelCase_ )
UpperCAmelCase__ : Any = {v: k for k, v in self.vocab.items()}
@property
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return len(self.vocab )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return dict(self.vocab ,**self.added_tokens_encoder )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> Any:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = []
for s in text:
char_tokens.extend(lowerCamelCase_ )
return char_tokens
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> str:
'''simple docstring'''
return self.vocab.get(lowerCamelCase_ ,self.vocab.get(self.unk_token ) )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> Any:
'''simple docstring'''
return self.decoder.get(lowerCamelCase_ )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCamelCase_ ) )
return
UpperCAmelCase__ : Union[str, Any] = os.path.join(
lowerCamelCase_ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(lowerCamelCase_ ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab ,indent=2 ,sort_keys=lowerCamelCase_ ,ensure_ascii=lowerCamelCase_ ) + '''\n''' )
return (vocab_file,)
| 496 | 1 |
'''simple docstring'''
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
lowerCAmelCase_ : List[Any] = {
'n_samples': 64,
'horizon': 32,
'num_inference_steps': 20,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
lowerCAmelCase_ : Dict = 'hopper-medium-v2'
lowerCAmelCase_ : Optional[Any] = gym.make(env_name)
lowerCAmelCase_ : List[str] = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
lowerCAmelCase_ : str = env.reset()
lowerCAmelCase_ : Union[str, Any] = 0
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : int = 1000
lowerCAmelCase_ : Optional[int] = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
lowerCAmelCase_ : List[str] = pipeline(obs, planning_horizon=32)
# execute action in environment
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = env.step(denorm_actions)
lowerCAmelCase_ : str = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'
f' {total_score}'
)
# save observations for rendering
rollout.append(next_observation.copy())
lowerCAmelCase_ : int = next_observation
except KeyboardInterrupt:
pass
print(f'Total reward: {total_reward}')
| 527 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase_ ( snake_case_ , unittest.TestCase ):
_lowerCAmelCase : str = KandinskyInpaintPipeline
_lowerCAmelCase : List[Any] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
_lowerCAmelCase : int = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
_lowerCAmelCase : List[str] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_lowerCAmelCase : Union[str, Any] = False
@property
def __lowercase ( self : str ):
"""simple docstring"""
return 32
@property
def __lowercase ( self : str ):
"""simple docstring"""
return 32
@property
def __lowercase ( self : Optional[int] ):
"""simple docstring"""
return self.time_input_dim
@property
def __lowercase ( self : int ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowercase ( self : int ):
"""simple docstring"""
return 1_00
@property
def __lowercase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __lowercase ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
SCREAMING_SNAKE_CASE : Any = MultilingualCLIP(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = text_encoder.eval()
return text_encoder
@property
def __lowercase ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE : Union[str, Any] = UNetaDConditionModel(**lowerCAmelCase__ )
return model
@property
def __lowercase ( self : Optional[Any] ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowercase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Tuple = self.dummy_tokenizer
SCREAMING_SNAKE_CASE : int = self.dummy_unet
SCREAMING_SNAKE_CASE : str = self.dummy_movq
SCREAMING_SNAKE_CASE : Optional[int] = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE : Optional[int] = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowercase ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any]=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowerCAmelCase__ )
# create init_image
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('''RGB''' ).resize((2_56, 2_56) )
# create mask
SCREAMING_SNAKE_CASE : Union[str, Any] = np.ones((64, 64) , dtype=np.floataa )
SCREAMING_SNAKE_CASE : str = 0
if str(lowerCAmelCase__ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __lowercase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = '''cpu'''
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[int] = self.pipeline_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : str = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE : int = output.images
SCREAMING_SNAKE_CASE : int = pipe(
**self.get_dummy_inputs(lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ , )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Dict = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : List[str] = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __lowercase ( self : str ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def __lowercase ( self : List[str] ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.ones((7_68, 7_68) , dtype=np.floataa )
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : Dict = '''a hat'''
SCREAMING_SNAKE_CASE : str = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Dict = pipeline.to(lowerCAmelCase__ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = pipe_prior(
lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE : Tuple = pipeline(
lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 527 | 1 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase =random.Random()
if is_torch_available():
import torch
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : int=1.0 , __lowerCamelCase : str=None , __lowerCamelCase : Tuple=None ):
'''simple docstring'''
if rng is None:
_UpperCAmelCase : Optional[Any] =global_rng
_UpperCAmelCase : Optional[int] =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __magic_name__ ( unittest.TestCase ):
def __init__( self , snake_case , snake_case=7 , snake_case=4_0_0 , snake_case=2_0_0_0 , snake_case=1 , snake_case=0.0 , snake_case=1_6_0_0_0 , snake_case=True , snake_case=True , ) -> int:
'''simple docstring'''
_UpperCAmelCase : int =parent
_UpperCAmelCase : Any =batch_size
_UpperCAmelCase : Tuple =min_seq_length
_UpperCAmelCase : Tuple =max_seq_length
_UpperCAmelCase : Optional[Any] =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase : int =feature_size
_UpperCAmelCase : List[str] =padding_value
_UpperCAmelCase : int =sampling_rate
_UpperCAmelCase : List[str] =return_attention_mask
_UpperCAmelCase : Tuple =do_normalize
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCAmelCase ( self , snake_case=False , snake_case=False) -> Any:
'''simple docstring'''
def _flatten(snake_case):
return list(itertools.chain(*snake_case))
if equal_length:
_UpperCAmelCase : List[Any] =floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
_UpperCAmelCase : Optional[Any] =[
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
_UpperCAmelCase : Optional[int] =[np.asarray(snake_case) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __magic_name__ ( lowerCAmelCase ,unittest.TestCase ):
UpperCAmelCase =ASTFeatureExtractor
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple =ASTFeatureExtractionTester(self)
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
# Tests that all call wrap to encode_plus and batch_encode_plus
_UpperCAmelCase : str =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase : str =[floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
_UpperCAmelCase : Optional[int] =[np.asarray(snake_case) for speech_input in speech_inputs]
# Test not batched input
_UpperCAmelCase : List[str] =feat_extract(speech_inputs[0] , return_tensors='np').input_values
_UpperCAmelCase : List[Any] =feat_extract(np_speech_inputs[0] , return_tensors='np').input_values
self.assertTrue(np.allclose(snake_case , snake_case , atol=1E-3))
# Test batched
_UpperCAmelCase : Tuple =feat_extract(snake_case , padding=snake_case , return_tensors='np').input_values
_UpperCAmelCase : List[Any] =feat_extract(snake_case , padding=snake_case , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1E-3))
# Test 2-D numpy arrays are batched.
_UpperCAmelCase : Dict =[floats_list((1, x))[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_UpperCAmelCase : Tuple =np.asarray(snake_case)
_UpperCAmelCase : Optional[Any] =feat_extract(snake_case , return_tensors='np').input_values
_UpperCAmelCase : Dict =feat_extract(snake_case , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1E-3))
@require_torch
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
import torch
_UpperCAmelCase : Any =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_UpperCAmelCase : int =np.random.rand(1_0_0).astype(np.floataa)
_UpperCAmelCase : str =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase : Optional[Any] =feature_extractor.pad([{'input_values': inputs}] , return_tensors='np')
self.assertTrue(np_processed.input_values.dtype == np.floataa)
_UpperCAmelCase : List[str] =feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt')
self.assertTrue(pt_processed.input_values.dtype == torch.floataa)
def lowerCAmelCase ( self , snake_case) -> int:
'''simple docstring'''
from datasets import load_dataset
_UpperCAmelCase : Optional[Any] =load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation')
# automatic decoding with librispeech
_UpperCAmelCase : int =ds.sort('id').select(range(snake_case))[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
# fmt: off
_UpperCAmelCase : List[str] =torch.tensor(
[-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76,
-1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33,
-1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36,
-0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69])
# fmt: on
_UpperCAmelCase : Dict =self._load_datasamples(1)
_UpperCAmelCase : Optional[Any] =ASTFeatureExtractor()
_UpperCAmelCase : Optional[Any] =feature_extractor(snake_case , return_tensors='pt').input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8))
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , snake_case , atol=1E-4))
| 331 |
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
lowercase =[int(0.5 * n * (n + 1)) for n in range(1, 101)]
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : int =os.path.dirname(os.path.realpath(__lowerCamelCase ) )
_UpperCAmelCase : List[Any] =os.path.join(__lowerCamelCase , 'words.txt' )
_UpperCAmelCase : int =''
with open(__lowerCamelCase ) as f:
_UpperCAmelCase : Tuple =f.readline()
_UpperCAmelCase : List[str] =[word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
_UpperCAmelCase : Dict =[
word
for word in [sum(ord(__lowerCamelCase ) - 6_4 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 331 | 1 |
def A__ ( lowercase: Any, lowercase: List[Any], lowercase: List[Any]=False ) -> Dict:
if isinstance(lowercase, lowercase ) and isinstance(lowercase, lowercase ):
A : int =len(set_a.intersection(lowercase ) )
if alternative_union:
A : Tuple =len(lowercase ) + len(lowercase )
else:
A : Any =len(set_a.union(lowercase ) )
return intersection / union
if isinstance(lowercase, (list, tuple) ) and isinstance(lowercase, (list, tuple) ):
A : int =[element for element in set_a if element in set_b]
if alternative_union:
A : Union[str, Any] =len(lowercase ) + len(lowercase )
return len(lowercase ) / union
else:
A : Optional[Any] =set_a + [element for element in set_b if element not in set_a]
return len(lowercase ) / len(lowercase )
return len(lowercase ) / len(lowercase )
return None
if __name__ == "__main__":
_lowercase : str ={'''a''', '''b''', '''c''', '''d''', '''e'''}
_lowercase : List[Any] ={'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 305 | import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : Any =get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : int = GPTSwaTokenizer
lowercase : Union[str, Any] = False
lowercase : Dict = True
lowercase : int = False
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
A : Dict =GPTSwaTokenizer(SCREAMING_SNAKE_CASE__ , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
A : Union[str, Any] ='This is a test'
A : str ='This is a test'
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Tuple:
A : int ='<s>'
A : Optional[Any] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Any:
A : Dict =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 20_00 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 20_00 )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Dict:
A : Union[str, Any] =GPTSwaTokenizer(SCREAMING_SNAKE_CASE__ )
A : str =tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [4_65, 2_87, 2_65, 6_31, 8_42] )
A : Dict =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
A : int =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] , )
A : List[str] =tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
# fmt: off
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[str]:
A : Dict =GPTSwaTokenizer(SCREAMING_SNAKE_CASE__ )
A : Tuple =['This is a test', 'I was born in 92000, and this is falsé.']
A : int =[
[4_65, 2_87, 2_65, 6_31, 8_42],
[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertListEqual(tokenizer.encode_fast(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(tokenizer.decode_fast(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Tuple:
A : Optional[int] =[
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
A : Any ={'input_ids': [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='AI-Sweden/gpt-sw3-126m' , sequences=SCREAMING_SNAKE_CASE__ , )
| 305 | 1 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = ["image_processor", "tokenizer"]
__lowerCamelCase = "AutoImageProcessor"
__lowerCamelCase = "AutoTokenizer"
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , snake_case__ )
lowercase__ : List[Any]= self.image_processor
def __call__( self , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowercase__ : Tuple= self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if images is not None:
lowercase__ : str= self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is not None and images is not None:
lowercase__ : Any= image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 85 |
"""simple docstring"""
a : List[Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def lowercase__(A ) ->bytes:
"""simple docstring"""
if not isinstance(A , A ):
lowercase__ : Union[str, Any]= f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(A )
lowercase__ : str= "".join(bin(A )[2:].zfill(8 ) for byte in data )
lowercase__ : Tuple= len(A ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ : Union[str, Any]= b"=" * ((6 - len(A ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(A ) % 6)
else:
lowercase__ : str= b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(A ) , 6 ) ).encode()
+ padding
)
def lowercase__(A ) ->bytes:
"""simple docstring"""
if not isinstance(A , A ) and not isinstance(A , A ):
lowercase__ : str= (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(A )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(A , A ):
try:
lowercase__ : Optional[Any]= encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ : List[Any]= encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ : str= encoded_data[:-padding]
lowercase__ : Tuple= "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ : Tuple= "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ : Any= [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(A ) , 8 )
]
return bytes(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = tempfile.mkdtemp()
# fmt: off
__lowerCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__lowerCamelCase = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
__lowerCamelCase = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def lowerCamelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowerCamelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__lowerCamelCase = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowerCamelCase = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
__lowerCamelCase = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = image_processor(__lowerCamelCase , return_tensors='''np''' )
__lowerCamelCase = processor(images=__lowerCamelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
__lowerCamelCase = '''lower newer'''
__lowerCamelCase = processor(text=__lowerCamelCase )
__lowerCamelCase = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
__lowerCamelCase = '''lower newer'''
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(__lowerCamelCase ):
processor()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
__lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCamelCase = processor.batch_decode(__lowerCamelCase )
__lowerCamelCase = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
__lowerCamelCase = '''lower newer'''
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 175 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__snake_case : Optional[int] =get_logger(__name__)
class lowerCamelCase__ :
'''simple docstring'''
def __init__(self ,__lowerCamelCase = None ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = (
os.path.join(__lowerCamelCase ,config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
lowerCAmelCase__ : Tuple = Extractor
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> str:
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
lowerCAmelCase__ : List[Any] = os.path.abspath(__lowerCamelCase )
return os.path.join(self.extract_dir ,hash_url_to_filename(__lowerCamelCase ) )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> bool:
"""simple docstring"""
return force_extract or (
not os.path.isfile(__lowerCamelCase ) and not (os.path.isdir(__lowerCamelCase ) and os.listdir(__lowerCamelCase ))
)
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = False ) -> str:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.extractor.infer_extractor_format(__lowerCamelCase )
if not extractor_format:
return input_path
lowerCAmelCase__ : Optional[int] = self._get_output_path(__lowerCamelCase )
if self._do_extract(__lowerCamelCase ,__lowerCamelCase ):
self.extractor.extract(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
return output_path
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
@classmethod
@abstractmethod
def lowerCAmelCase__ (cls ,__lowerCamelCase ,**__lowerCamelCase ) -> bool:
"""simple docstring"""
...
@staticmethod
@abstractmethod
def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> None:
"""simple docstring"""
...
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__):
'''simple docstring'''
snake_case_ =[]
@staticmethod
def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
with open(__lowerCamelCase ,'''rb''' ) as f:
return f.read(__lowerCamelCase )
@classmethod
def lowerCAmelCase__ (cls ,__lowerCamelCase ,__lowerCamelCase = b"" ) -> bool:
"""simple docstring"""
if not magic_number:
lowerCAmelCase__ : Optional[Any] = max(len(__lowerCamelCase ) for cls_magic_number in cls.magic_numbers )
try:
lowerCAmelCase__ : Optional[Any] = cls.read_magic_number(__lowerCamelCase ,__lowerCamelCase )
except OSError:
return False
return any(magic_number.startswith(__lowerCamelCase ) for cls_magic_number in cls.magic_numbers )
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
@classmethod
def lowerCAmelCase__ (cls ,__lowerCamelCase ,**__lowerCamelCase ) -> bool:
"""simple docstring"""
return tarfile.is_tarfile(__lowerCamelCase )
@staticmethod
def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> Any:
"""simple docstring"""
def resolved(__lowerCamelCase ) -> str:
return os.path.realpath(os.path.abspath(__lowerCamelCase ) )
def badpath(__lowerCamelCase ,__lowerCamelCase ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__lowerCamelCase ,__lowerCamelCase ) ).startswith(__lowerCamelCase )
def badlink(__lowerCamelCase ,__lowerCamelCase ) -> bool:
# Links are interpreted relative to the directory containing the link
lowerCAmelCase__ : Dict = resolved(os.path.join(__lowerCamelCase ,os.path.dirname(info.name ) ) )
return badpath(info.linkname ,base=__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = resolved(__lowerCamelCase )
for finfo in members:
if badpath(finfo.name ,__lowerCamelCase ):
logger.error(f"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(__lowerCamelCase ,__lowerCamelCase ):
logger.error(f"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(__lowerCamelCase ,__lowerCamelCase ):
logger.error(f"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> None:
"""simple docstring"""
os.makedirs(__lowerCamelCase ,exist_ok=__lowerCamelCase )
lowerCAmelCase__ : int = tarfile.open(__lowerCamelCase )
tar_file.extractall(__lowerCamelCase ,members=TarExtractor.safemembers(__lowerCamelCase ,__lowerCamelCase ) )
tar_file.close()
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =[b"""\x1F\x8B"""]
@staticmethod
def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> None:
"""simple docstring"""
with gzip.open(__lowerCamelCase ,'''rb''' ) as gzip_file:
with open(__lowerCamelCase ,'''wb''' ) as extracted_file:
shutil.copyfileobj(__lowerCamelCase ,__lowerCamelCase )
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =[
b"""PK\x03\x04""",
b"""PK\x05\x06""", # empty archive
b"""PK\x07\x08""", # spanned archive
]
@classmethod
def lowerCAmelCase__ (cls ,__lowerCamelCase ,__lowerCamelCase = b"" ) -> bool:
"""simple docstring"""
if super().is_extractable(__lowerCamelCase ,magic_number=__lowerCamelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__lowerCamelCase ,'''rb''' ) as fp:
lowerCAmelCase__ : Optional[int] = _EndRecData(__lowerCamelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
lowerCAmelCase__ : Optional[int] = fp.read(__lowerCamelCase ) # CD is where we expect it to be
if len(__lowerCamelCase ) == sizeCentralDir:
lowerCAmelCase__ : List[str] = struct.unpack(__lowerCamelCase ,__lowerCamelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> None:
"""simple docstring"""
os.makedirs(__lowerCamelCase ,exist_ok=__lowerCamelCase )
with zipfile.ZipFile(__lowerCamelCase ,'''r''' ) as zip_file:
zip_file.extractall(__lowerCamelCase )
zip_file.close()
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =[b"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> None:
"""simple docstring"""
with lzma.open(__lowerCamelCase ) as compressed_file:
with open(__lowerCamelCase ,'''wb''' ) as extracted_file:
shutil.copyfileobj(__lowerCamelCase ,__lowerCamelCase )
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =[b"""Rar!\x1a\x07\x00""", b"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> None:
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''' )
import rarfile
os.makedirs(__lowerCamelCase ,exist_ok=__lowerCamelCase )
lowerCAmelCase__ : Dict = rarfile.RarFile(__lowerCamelCase )
rf.extractall(__lowerCamelCase )
rf.close()
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =[b"""\x28\xb5\x2F\xFD"""]
@staticmethod
def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> None:
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''' )
import zstandard as zstd
lowerCAmelCase__ : Dict = zstd.ZstdDecompressor()
with open(__lowerCamelCase ,'''rb''' ) as ifh, open(__lowerCamelCase ,'''wb''' ) as ofh:
dctx.copy_stream(__lowerCamelCase ,__lowerCamelCase )
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =[b"""\x42\x5A\x68"""]
@staticmethod
def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> None:
"""simple docstring"""
with bza.open(__lowerCamelCase ,'''rb''' ) as compressed_file:
with open(__lowerCamelCase ,'''wb''' ) as extracted_file:
shutil.copyfileobj(__lowerCamelCase ,__lowerCamelCase )
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =[b"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> None:
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''' )
import pyazr
os.makedirs(__lowerCamelCase ,exist_ok=__lowerCamelCase )
with pyazr.SevenZipFile(__lowerCamelCase ,'''r''' ) as archive:
archive.extractall(__lowerCamelCase )
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =[b"""\x04\x22\x4D\x18"""]
@staticmethod
def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> None:
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''' )
import lza.frame
with lza.frame.open(__lowerCamelCase ,'''rb''' ) as compressed_file:
with open(__lowerCamelCase ,'''wb''' ) as extracted_file:
shutil.copyfileobj(__lowerCamelCase ,__lowerCamelCase )
class lowerCamelCase__ :
'''simple docstring'''
snake_case_ ={
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def lowerCAmelCase__ (cls ) -> str:
"""simple docstring"""
return max(
len(__lowerCamelCase )
for extractor in cls.extractors.values()
if issubclass(__lowerCamelCase ,__lowerCamelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> Tuple:
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(__lowerCamelCase ,magic_number_length=__lowerCamelCase )
except OSError:
return b""
@classmethod
def lowerCAmelCase__ (cls ,__lowerCamelCase ,__lowerCamelCase = False ) -> bool:
"""simple docstring"""
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' ,category=__lowerCamelCase ,)
lowerCAmelCase__ : int = cls.infer_extractor_format(__lowerCamelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def lowerCAmelCase__ (cls ,__lowerCamelCase ) -> str: # <Added version="2.4.0"/>
"""simple docstring"""
lowerCAmelCase__ : Dict = cls._get_magic_number_max_length()
lowerCAmelCase__ : Any = cls._read_magic_number(__lowerCamelCase ,__lowerCamelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__lowerCamelCase ,magic_number=__lowerCamelCase ):
return extractor_format
@classmethod
def lowerCAmelCase__ (cls ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = None ,__lowerCamelCase = "deprecated" ,) -> None:
"""simple docstring"""
os.makedirs(os.path.dirname(__lowerCamelCase ) ,exist_ok=__lowerCamelCase )
# Prevent parallel extractions
lowerCAmelCase__ : Dict = str(Path(__lowerCamelCase ).with_suffix('''.lock''' ) )
with FileLock(__lowerCamelCase ):
shutil.rmtree(__lowerCamelCase ,ignore_errors=__lowerCamelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__lowerCamelCase ,__lowerCamelCase ): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' ,category=__lowerCamelCase ,)
lowerCAmelCase__ : Dict = extractor if extractor != '''deprecated''' else extractor_format
else:
lowerCAmelCase__ : str = cls.extractors[extractor_format]
return extractor.extract(__lowerCamelCase ,__lowerCamelCase )
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' ,category=__lowerCamelCase ,)
for extractor in cls.extractors.values():
if extractor.is_extractable(__lowerCamelCase ):
return extractor.extract(__lowerCamelCase ,__lowerCamelCase )
| 647 | 0 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
A = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def UpperCAmelCase ( ):
lowerCamelCase : str = Github(os.environ['GITHUB_TOKEN'])
lowerCamelCase : Optional[Any] = g.get_repo('huggingface/diffusers')
lowerCamelCase : int = repo.get_issues(state='open')
for issue in open_issues:
lowerCamelCase : Union[str, Any] = sorted(issue.get_comments() , key=lambda UpperCAmelCase__: i.created_at , reverse=UpperCAmelCase__)
lowerCamelCase : int = comments[0] if len(UpperCAmelCase__) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed')
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open')
issue.remove_from_labels('stale')
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.')
issue.add_to_labels('stale')
if __name__ == "__main__":
main()
| 449 |
'''simple docstring'''
def UpperCAmelCase ( UpperCAmelCase__ : list[int]):
if not nums: # Makes sure that the list is not empty
raise ValueError('List is empty')
lowerCamelCase : int = sum(UpperCAmelCase__) / len(UpperCAmelCase__) # Calculate the average
return sum(abs(x - average) for x in nums) / len(UpperCAmelCase__)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 449 | 1 |
'''simple docstring'''
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
_A = mf_knapsack(i - 1 , __lowercase , __lowercase , __lowercase )
else:
_A = max(
mf_knapsack(i - 1 , __lowercase , __lowercase , __lowercase ) , mf_knapsack(i - 1 , __lowercase , __lowercase , j - wt[i - 1] ) + val[i - 1] , )
_A = val
return f[i][j]
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase ) -> int:
'''simple docstring'''
_A = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
_A = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
_A = dp[i - 1][w_]
return dp[n][w_], dp
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> str:
'''simple docstring'''
if not (isinstance(__lowercase , (list, tuple) ) and isinstance(__lowercase , (list, tuple) )):
raise ValueError(
"Both the weights and values vectors must be either lists or tuples" )
_A = len(__lowercase )
if num_items != len(__lowercase ):
_A = (
"The number of weights must be the same as the number of values.\n"
F'''But got {num_items} weights and {len(__lowercase )} values'''
)
raise ValueError(__lowercase )
for i in range(__lowercase ):
if not isinstance(wt[i] , __lowercase ):
_A = (
"All weights must be integers but got weight of "
F'''type {type(wt[i] )} at index {i}'''
)
raise TypeError(__lowercase )
_A = knapsack(__lowercase , __lowercase , __lowercase , __lowercase )
_A = set()
_construct_solution(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
return optimal_val, example_optional_set
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Dict:
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(__lowercase , __lowercase , i - 1 , __lowercase , __lowercase )
else:
optimal_set.add(__lowercase )
_construct_solution(__lowercase , __lowercase , i - 1 , j - wt[i - 1] , __lowercase )
if __name__ == "__main__":
lowerCamelCase_ = [3, 2, 4, 4]
lowerCamelCase_ = [4, 3, 2, 3]
lowerCamelCase_ = 4
lowerCamelCase_ = 6
lowerCamelCase_ = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
lowerCamelCase_ , lowerCamelCase_ = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
lowerCamelCase_ , lowerCamelCase_ = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('''optimal_value = ''', optimal_solution)
print('''An optimal subset corresponding to the optimal value''', optimal_subset)
| 330 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
A_ = logging.get_logger(__name__)
@dataclass
class _snake_case :
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Any=False ,SCREAMING_SNAKE_CASE__ : Dict=False ,SCREAMING_SNAKE_CASE__ : Tuple=6.0 ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,SCREAMING_SNAKE_CASE__ : List[str]=False ,SCREAMING_SNAKE_CASE__ : Any=False ,SCREAMING_SNAKE_CASE__ : Tuple=None ,SCREAMING_SNAKE_CASE__ : int="fp4" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ,):
SCREAMING_SNAKE_CASE:Dict = load_in_abit
SCREAMING_SNAKE_CASE:Optional[int] = load_in_abit
SCREAMING_SNAKE_CASE:Optional[Any] = llm_inta_threshold
SCREAMING_SNAKE_CASE:Dict = llm_inta_skip_modules
SCREAMING_SNAKE_CASE:str = llm_inta_enable_fpaa_cpu_offload
SCREAMING_SNAKE_CASE:Union[str, Any] = llm_inta_has_fpaa_weight
SCREAMING_SNAKE_CASE:int = bnb_abit_quant_type
SCREAMING_SNAKE_CASE:int = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
SCREAMING_SNAKE_CASE:Dict = torch.floataa
elif isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE:str = getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ ,torch.dtype ):
SCREAMING_SNAKE_CASE:int = bnb_abit_compute_dtype
else:
raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype" )
self.post_init()
def __UpperCamelCase ( self : Tuple ):
if not isinstance(self.llm_inta_threshold ,SCREAMING_SNAKE_CASE__ ):
raise ValueError("llm_int8_threshold must be a float" )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules ,SCREAMING_SNAKE_CASE__ ):
raise ValueError("llm_int8_skip_modules must be a list of strings" )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload ,SCREAMING_SNAKE_CASE__ ):
raise ValueError("llm_int8_enable_fp32_cpu_offload must be a boolean" )
if not isinstance(self.llm_inta_has_fpaa_weight ,SCREAMING_SNAKE_CASE__ ):
raise ValueError("llm_int8_has_fp16_weight must be a boolean" )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype ,torch.dtype ):
raise ValueError("bnb_4bit_compute_dtype must be torch.dtype" )
if not isinstance(self.bnb_abit_quant_type ,SCREAMING_SNAKE_CASE__ ):
raise ValueError("bnb_4bit_quant_type must be a string" )
if not isinstance(self.bnb_abit_use_double_quant ,SCREAMING_SNAKE_CASE__ ):
raise ValueError("bnb_4bit_use_double_quant must be a boolean" )
if self.load_in_abit and not version.parse(importlib.metadata.version("bitsandbytes" ) ) >= version.parse(
"0.39.0" ):
raise ValueError(
"4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version" )
def __UpperCamelCase ( self : Dict ):
return self.load_in_abit or self.load_in_abit
def __UpperCamelCase ( self : Union[str, Any] ):
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def __UpperCamelCase ( cls : Dict ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Dict ,**SCREAMING_SNAKE_CASE__ : Tuple ):
SCREAMING_SNAKE_CASE:List[str] = cls(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[str] = []
for key, value in kwargs.items():
if hasattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
setattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
to_remove.append(SCREAMING_SNAKE_CASE__ )
for key in to_remove:
kwargs.pop(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
if return_unused_kwargs:
return config, kwargs
else:
return config
def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] ):
with open(SCREAMING_SNAKE_CASE__ ,"w" ,encoding="utf-8" ) as writer:
SCREAMING_SNAKE_CASE:List[str] = self.to_dict()
SCREAMING_SNAKE_CASE:Union[str, Any] = json.dumps(SCREAMING_SNAKE_CASE__ ,indent=2 ,sort_keys=SCREAMING_SNAKE_CASE__ ) + "\n"
writer.write(SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE:Optional[int] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = str(output["bnb_4bit_compute_dtype"] ).split("." )[1]
return output
def __repr__( self : Tuple ):
return F'''{self.__class__.__name__} {self.to_json_string()}'''
def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : bool = True ):
if use_diff is True:
SCREAMING_SNAKE_CASE:str = self.to_diff_dict()
else:
SCREAMING_SNAKE_CASE:List[Any] = self.to_dict()
return json.dumps(SCREAMING_SNAKE_CASE__ ,indent=2 ,sort_keys=SCREAMING_SNAKE_CASE__ ) + "\n"
def __UpperCamelCase ( self : int ):
SCREAMING_SNAKE_CASE:Union[str, Any] = self.to_dict()
# get the default config dict
SCREAMING_SNAKE_CASE:str = BitsAndBytesConfig().to_dict()
SCREAMING_SNAKE_CASE:Any = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
SCREAMING_SNAKE_CASE:List[str] = value
return serializable_config_dict
| 143 | 0 |
"""simple docstring"""
from __future__ import annotations
UpperCAmelCase =list[tuple[int, int]]
UpperCAmelCase =[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCAmelCase =([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,) -> int:
A = pos_x
A = pos_y
A = (pos_y, pos_x)
A = goal_x
A = goal_y
A = g_cost
A = parent
A = self.calculate_heuristic()
def UpperCamelCase__ ( self ) -> float:
A = abs(self.pos_x - self.goal_x )
A = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self ,lowerCamelCase_ ) -> bool:
return self.f_cost < other.f_cost
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[str]:
A = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,0 ,lowerCamelCase_ )
A = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,9_9_9_9_9 ,lowerCamelCase_ )
A = [self.start]
A = []
A = False
def UpperCamelCase__ ( self ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
A = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
A = True
return self.retrace_path(lowerCamelCase_ )
self.closed_nodes.append(lowerCamelCase_ )
A = self.get_successors(lowerCamelCase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase_ )
else:
# retrieve the best current path
A = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase_ )
else:
self.open_nodes.append(lowerCamelCase_ )
if not self.reached:
return [self.start.pos]
return None
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> list[Node]:
A = []
for action in delta:
A = parent.pos_x + action[1]
A = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase_ ,lowerCamelCase_ ,self.target.pos_y ,self.target.pos_x ,parent.g_cost + 1 ,lowerCamelCase_ ,) )
return successors
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Path:
A = node
A = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
A = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
UpperCAmelCase =(0, 0)
UpperCAmelCase =(len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
UpperCAmelCase =GreedyBestFirst(init, goal)
UpperCAmelCase =greedy_bf.search()
if path:
for pos_x, pos_y in path:
UpperCAmelCase =2
for elem in grid:
print(elem)
| 718 |
"""simple docstring"""
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = (DDPMParallelScheduler,)
def UpperCamelCase__ ( self ,**lowerCamelCase_ ) -> List[Any]:
A = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**lowerCamelCase_ )
return config
def UpperCamelCase__ ( self ) -> Tuple:
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> Dict:
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] ,[0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCamelCase_ ,beta_end=lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> List[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> str:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> Any:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> int:
self.check_over_configs(thresholding=lowerCamelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCamelCase_ ,prediction_type=lowerCamelCase_ ,sample_max_value=lowerCamelCase_ ,)
def UpperCamelCase__ ( self ) -> Union[str, Any]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> Optional[Any]:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**lowerCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def UpperCamelCase__ ( self ) -> Optional[Any]:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**lowerCamelCase_ )
A = len(lowerCamelCase_ )
A = self.dummy_model()
A = self.dummy_sample_deter
A = self.dummy_sample_deter + 0.1
A = self.dummy_sample_deter - 0.1
A = samplea.shape[0]
A = torch.stack([samplea, samplea, samplea] ,dim=0 )
A = torch.arange(lowerCamelCase_ )[0:3, None].repeat(1 ,lowerCamelCase_ )
A = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) )
A = scheduler.batch_step_no_noise(lowerCamelCase_ ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) )
A = torch.sum(torch.abs(lowerCamelCase_ ) )
A = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 11_53.18_33 ) < 1E-2
assert abs(result_mean.item() - 0.50_05 ) < 1E-3
def UpperCamelCase__ ( self ) -> Optional[int]:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**lowerCamelCase_ )
A = len(lowerCamelCase_ )
A = self.dummy_model()
A = self.dummy_sample_deter
A = torch.manual_seed(0 )
for t in reversed(range(lowerCamelCase_ ) ):
# 1. predict noise residual
A = model(lowerCamelCase_ ,lowerCamelCase_ )
# 2. predict previous mean of sample x_t-1
A = scheduler.step(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,generator=lowerCamelCase_ ).prev_sample
A = pred_prev_sample
A = torch.sum(torch.abs(lowerCamelCase_ ) )
A = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 2_58.96_06 ) < 1E-2
assert abs(result_mean.item() - 0.33_72 ) < 1E-3
def UpperCamelCase__ ( self ) -> int:
A = self.scheduler_classes[0]
A = self.get_scheduler_config(prediction_type="""v_prediction""" )
A = scheduler_class(**lowerCamelCase_ )
A = len(lowerCamelCase_ )
A = self.dummy_model()
A = self.dummy_sample_deter
A = torch.manual_seed(0 )
for t in reversed(range(lowerCamelCase_ ) ):
# 1. predict noise residual
A = model(lowerCamelCase_ ,lowerCamelCase_ )
# 2. predict previous mean of sample x_t-1
A = scheduler.step(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,generator=lowerCamelCase_ ).prev_sample
A = pred_prev_sample
A = torch.sum(torch.abs(lowerCamelCase_ ) )
A = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 2_02.02_96 ) < 1E-2
assert abs(result_mean.item() - 0.26_31 ) < 1E-3
def UpperCamelCase__ ( self ) -> Optional[Any]:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**lowerCamelCase_ )
A = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
A = scheduler.timesteps
for i, timestep in enumerate(lowerCamelCase_ ):
if i == len(lowerCamelCase_ ) - 1:
A = -1
else:
A = timesteps[i + 1]
A = scheduler.previous_timestep(lowerCamelCase_ )
A = prev_t.item()
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> str:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**lowerCamelCase_ )
A = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(lowerCamelCase_ ,msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> str:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**lowerCamelCase_ )
A = [1_0_0, 8_7, 5_0, 1, 0]
A = len(lowerCamelCase_ )
with self.assertRaises(lowerCamelCase_ ,msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=lowerCamelCase_ ,timesteps=lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> List[str]:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**lowerCamelCase_ )
A = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCamelCase_ ,msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" ,):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
| 255 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : str = logging.get_logger(__name__)
__A : List[Any] = {
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class __UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase : List[Any] = """switch_transformers"""
lowercase : Union[str, Any] = ["""past_key_values"""]
lowercase : Union[str, Any] = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self :List[str] ,_UpperCamelCase :Optional[Any]=3_2_1_2_8 ,_UpperCamelCase :Union[str, Any]=7_6_8 ,_UpperCamelCase :List[Any]=6_4 ,_UpperCamelCase :Optional[Any]=2_0_4_8 ,_UpperCamelCase :Optional[Any]=6_4 ,_UpperCamelCase :Any=1_2 ,_UpperCamelCase :Any=3 ,_UpperCamelCase :int=1_2 ,_UpperCamelCase :Optional[int]=3 ,_UpperCamelCase :str=1_2 ,_UpperCamelCase :Any=8 ,_UpperCamelCase :int=False ,_UpperCamelCase :str=0.01 ,_UpperCamelCase :Tuple="float32" ,_UpperCamelCase :str=False ,_UpperCamelCase :Any=3_2 ,_UpperCamelCase :Union[str, Any]=1_2_8 ,_UpperCamelCase :Dict=0.1 ,_UpperCamelCase :Optional[Any]=1E-6 ,_UpperCamelCase :Any=0.0_01 ,_UpperCamelCase :Tuple=0.0_01 ,_UpperCamelCase :Optional[int]=1.0 ,_UpperCamelCase :Tuple="relu" ,_UpperCamelCase :Dict=True ,_UpperCamelCase :Dict=False ,_UpperCamelCase :Dict=True ,_UpperCamelCase :Dict=0 ,_UpperCamelCase :Optional[Any]=1 ,**_UpperCamelCase :Any ,):
snake_case_ : Optional[int] = vocab_size
snake_case_ : str = d_model
snake_case_ : Optional[Any] = d_kv
snake_case_ : Optional[int] = d_ff
snake_case_ : Any = num_sparse_encoder_layers
snake_case_ : Dict = num_layers
snake_case_ : Optional[int] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
snake_case_ : Dict = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
snake_case_ : Optional[Any] = self.num_layers // self.num_sparse_encoder_layers
else:
snake_case_ : Union[str, Any] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
snake_case_ : str = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
snake_case_ : Union[str, Any] = self.num_decoder_layers # HACK: this will create 0 sparse layers
snake_case_ : List[Any] = num_heads
snake_case_ : int = num_experts
snake_case_ : List[str] = expert_capacity
snake_case_ : Any = router_bias
snake_case_ : Dict = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
snake_case_ : str = router_dtype
snake_case_ : Optional[int] = router_ignore_padding_tokens
snake_case_ : Dict = relative_attention_num_buckets
snake_case_ : Dict = relative_attention_max_distance
snake_case_ : List[str] = dropout_rate
snake_case_ : Union[str, Any] = layer_norm_epsilon
snake_case_ : Optional[int] = initializer_factor
snake_case_ : List[Any] = feed_forward_proj
snake_case_ : Dict = use_cache
snake_case_ : str = add_router_probs
snake_case_ : Dict = router_z_loss_coef
snake_case_ : Dict = router_aux_loss_coef
snake_case_ : List[str] = self.feed_forward_proj.split("""-""" )
snake_case_ : Union[str, Any] = act_info[-1]
snake_case_ : List[Any] = act_info[0] == """gated"""
if len(_UpperCamelCase ) > 1 and act_info[0] != "gated" or len(_UpperCamelCase ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
snake_case_ : List[Any] = """gelu_new"""
super().__init__(
pad_token_id=_UpperCamelCase ,eos_token_id=_UpperCamelCase ,is_encoder_decoder=_UpperCamelCase ,**_UpperCamelCase ,) | 334 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowercase ( SCREAMING_SNAKE_CASE__ : Any ) -> str:
return getitem, k
def lowercase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> int:
return setitem, k, v
def lowercase ( SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]:
return delitem, k
def lowercase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Any:
try:
return fun(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ), None
except Exception as e:
return None, e
a__ = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
a__ = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
a__ = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
a__ = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
a__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def lowercase ( SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[int]:
_snake_case : List[str] = HashMap(initial_block_size=4 )
_snake_case : List[Any] = {}
for _, (fun, *args) in enumerate(SCREAMING_SNAKE_CASE__ ):
_snake_case , _snake_case : Tuple = _run_operation(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
_snake_case , _snake_case : Dict = _run_operation(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
assert my_res == py_res
assert str(SCREAMING_SNAKE_CASE__ ) == str(SCREAMING_SNAKE_CASE__ )
assert set(SCREAMING_SNAKE_CASE__ ) == set(SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
assert set(my.items() ) == set(py.items() )
def lowercase ( ) -> List[str]:
def is_public(SCREAMING_SNAKE_CASE__ : str ) -> bool:
return not name.startswith("""_""" )
_snake_case : Optional[Any] = {name for name in dir({} ) if is_public(SCREAMING_SNAKE_CASE__ )}
_snake_case : str = {name for name in dir(HashMap() ) if is_public(SCREAMING_SNAKE_CASE__ )}
assert dict_public_names > hash_public_names
| 477 | 0 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def lowerCamelCase__ (__lowerCamelCase="" ):
_SCREAMING_SNAKE_CASE : List[Any] = tempfile.mkdtemp()
return os.path.join(__lowerCamelCase, str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
_SCREAMING_SNAKE_CASE : Dict = AgentAudio(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(__lowerCamelCase , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(__lowerCamelCase ) )
# Ensure that the file contains the same value as the original tensor
_SCREAMING_SNAKE_CASE : Union[str, Any] = sf.read(__lowerCamelCase )
self.assertTrue(torch.allclose(__lowerCamelCase , torch.tensor(__lowerCamelCase ) , atol=1E-4 ) )
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : Tuple = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
_SCREAMING_SNAKE_CASE : Tuple = get_new_path(suffix=".wav" )
sf.write(__lowerCamelCase , __lowerCamelCase , 1_6_0_0_0 )
_SCREAMING_SNAKE_CASE : Tuple = AgentAudio(__lowerCamelCase )
self.assertTrue(torch.allclose(__lowerCamelCase , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , __lowerCamelCase )
@require_vision
@require_torch
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : Dict = torch.randint(0 , 2_5_6 , (6_4, 6_4, 3) )
_SCREAMING_SNAKE_CASE : str = AgentImage(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(__lowerCamelCase , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__lowerCamelCase ) )
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : List[str] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
_SCREAMING_SNAKE_CASE : Dict = Image.open(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = AgentImage(__lowerCamelCase )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__lowerCamelCase ) )
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
_SCREAMING_SNAKE_CASE : Dict = Image.open(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = AgentImage(__lowerCamelCase )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__lowerCamelCase ) )
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Dict = "Hey!"
_SCREAMING_SNAKE_CASE : Union[str, Any] = AgentText(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , agent_type.to_string() )
self.assertEqual(__lowerCamelCase , agent_type.to_raw() )
self.assertEqual(__lowerCamelCase , __lowerCamelCase ) | 720 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class lowerCAmelCase__( __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = BertJapaneseTokenizer
__snake_case = False
__snake_case = True
def UpperCamelCase_ ( self ) -> Dict:
super().setUp()
_SCREAMING_SNAKE_CASE : Dict = [
"[UNK]",
"[CLS]",
"[SEP]",
"こんにちは",
"こん",
"にちは",
"ばんは",
"##こん",
"##にちは",
"##ばんは",
"世界",
"##世界",
"、",
"##、",
"。",
"##。",
]
_SCREAMING_SNAKE_CASE : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[str]:
_SCREAMING_SNAKE_CASE : List[Any] = "こんにちは、世界。 \nこんばんは、世界。"
_SCREAMING_SNAKE_CASE : str = "こんにちは 、 世界 。 こんばんは 、 世界 。"
return input_text, output_text
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = self.get_input_output_texts(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = tokenizer.decode(__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase )
return text, ids
def UpperCamelCase_ ( self ) -> List[str]:
pass # TODO add if relevant
def UpperCamelCase_ ( self ) -> Union[str, Any]:
pass # TODO add if relevant
def UpperCamelCase_ ( self ) -> List[str]:
pass # TODO add if relevant
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : str = self.tokenizer_class(self.vocab_file )
_SCREAMING_SNAKE_CASE : int = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" )
self.assertListEqual(__lowerCamelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" )
self.assertIsNotNone(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = "こんにちは、世界。\nこんばんは、世界。"
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
_SCREAMING_SNAKE_CASE : int = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(__lowerCamelCase , "wb" ) as handle:
pickle.dump(__lowerCamelCase , __lowerCamelCase )
with open(__lowerCamelCase , "rb" ) as handle:
_SCREAMING_SNAKE_CASE : Optional[Any] = pickle.load(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = tokenizer_new.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Any = MecabTokenizer(mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def UpperCamelCase_ ( self ) -> str:
try:
_SCREAMING_SNAKE_CASE : List[Any] = MecabTokenizer(mecab_dic="unidic_lite" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def UpperCamelCase_ ( self ) -> List[Any]:
try:
_SCREAMING_SNAKE_CASE : Dict = MecabTokenizer(mecab_dic="unidic" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Dict = MecabTokenizer(do_lower_case=__lowerCamelCase , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def UpperCamelCase_ ( self ) -> Dict:
try:
_SCREAMING_SNAKE_CASE : Any = MecabTokenizer(
do_lower_case=__lowerCamelCase , normalize_text=__lowerCamelCase , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : List[str] = MecabTokenizer(normalize_text=__lowerCamelCase , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" )
self.assertIsNotNone(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = "こんにちは、世界。\nこんばんは、世界。"
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
_SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(__lowerCamelCase , "wb" ) as handle:
pickle.dump(__lowerCamelCase , __lowerCamelCase )
with open(__lowerCamelCase , "rb" ) as handle:
_SCREAMING_SNAKE_CASE : List[str] = pickle.load(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer_new.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@require_sudachi
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Tuple = SudachiTokenizer(sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Tuple = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] )
@require_sudachi
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : Tuple = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] )
@require_sudachi
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[str] = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] )
@require_sudachi
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : Dict = SudachiTokenizer(do_lower_case=__lowerCamelCase , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : List[Any] = SudachiTokenizer(normalize_text=__lowerCamelCase , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = SudachiTokenizer(trim_whitespace=__lowerCamelCase , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" )
self.assertIsNotNone(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = "こんにちは、世界。\nこんばんは、世界。"
_SCREAMING_SNAKE_CASE : Dict = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
_SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(__lowerCamelCase , "wb" ) as handle:
pickle.dump(__lowerCamelCase , __lowerCamelCase )
with open(__lowerCamelCase , "rb" ) as handle:
_SCREAMING_SNAKE_CASE : Any = pickle.load(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = tokenizer_new.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@require_jumanpp
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : Optional[Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : List[Any] = JumanppTokenizer(do_lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Optional[int] = JumanppTokenizer(normalize_text=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : List[str] = JumanppTokenizer(trim_whitespace=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Tuple = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"]
_SCREAMING_SNAKE_CASE : str = {}
for i, token in enumerate(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = i
_SCREAMING_SNAKE_CASE : Union[str, Any] = WordpieceTokenizer(vocab=__lowerCamelCase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] )
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : int = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" )
_SCREAMING_SNAKE_CASE : str = tokenizer.subword_tokenizer
_SCREAMING_SNAKE_CASE : Union[str, Any] = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" )
self.assertListEqual(__lowerCamelCase , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] )
_SCREAMING_SNAKE_CASE : Tuple = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" )
self.assertListEqual(__lowerCamelCase , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] )
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" )
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode("ありがとう。" , add_special_tokens=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = tokenizer.encode("どういたしまして。" , add_special_tokens=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCAmelCase__( __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = BertJapaneseTokenizer
__snake_case = False
def UpperCamelCase_ ( self ) -> Union[str, Any]:
super().setUp()
_SCREAMING_SNAKE_CASE : List[str] = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
_SCREAMING_SNAKE_CASE : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCamelCase_ ( self , **__lowerCamelCase ) -> List[Any]:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Optional[int] = "こんにちは、世界。 \nこんばんは、世界。"
_SCREAMING_SNAKE_CASE : Dict = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
return input_text, output_text
def UpperCamelCase_ ( self ) -> Tuple:
pass # TODO add if relevant
def UpperCamelCase_ ( self ) -> Union[str, Any]:
pass # TODO add if relevant
def UpperCamelCase_ ( self ) -> int:
pass # TODO add if relevant
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : int = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" )
_SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" )
self.assertListEqual(
__lowerCamelCase , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2] )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Optional[Any] = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = {}
for i, token in enumerate(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = i
_SCREAMING_SNAKE_CASE : List[Any] = CharacterTokenizer(vocab=__lowerCamelCase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] )
self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] )
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" )
_SCREAMING_SNAKE_CASE : int = tokenizer.encode("ありがとう。" , add_special_tokens=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = tokenizer.encode("どういたしまして。" , add_special_tokens=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = "cl-tohoku/bert-base-japanese"
_SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Optional[Any] = "cl-tohoku/bert-base-japanese"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertTokenizer.from_pretrained(__lowerCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = "bert-base-cased"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertJapaneseTokenizer.from_pretrained(__lowerCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) ) | 381 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , A : int , A : Union[str, Any]=13 , A : str=7 , A : Tuple=True , A : Optional[int]=True , A : Dict=True , A : Optional[int]=True , A : str=99 , A : int=[1, 1, 2] , A : Union[str, Any]=1 , A : Optional[Any]=32 , A : int=4 , A : str=8 , A : Optional[int]=37 , A : Dict="gelu_new" , A : Union[str, Any]=0.1 , A : Optional[int]=0.1 , A : Dict=0.0 , A : Optional[int]=512 , A : Any=3 , A : Union[str, Any]=0.02 , A : List[str]=3 , A : Dict=4 , A : List[Any]=None , A : str=False , ):
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : Optional[Any] = batch_size
_UpperCAmelCase : int = seq_length
_UpperCAmelCase : Optional[int] = is_training
_UpperCAmelCase : int = use_input_mask
_UpperCAmelCase : List[Any] = use_token_type_ids
_UpperCAmelCase : Optional[int] = use_labels
_UpperCAmelCase : Optional[int] = vocab_size
_UpperCAmelCase : Optional[int] = block_sizes
_UpperCAmelCase : List[Any] = num_decoder_layers
_UpperCAmelCase : Union[str, Any] = d_model
_UpperCAmelCase : List[Any] = n_head
_UpperCAmelCase : str = d_head
_UpperCAmelCase : Dict = d_inner
_UpperCAmelCase : Dict = hidden_act
_UpperCAmelCase : List[str] = hidden_dropout
_UpperCAmelCase : Optional[int] = attention_dropout
_UpperCAmelCase : int = activation_dropout
_UpperCAmelCase : List[Any] = max_position_embeddings
_UpperCAmelCase : List[str] = type_vocab_size
_UpperCAmelCase : str = 2
_UpperCAmelCase : str = num_labels
_UpperCAmelCase : Optional[int] = num_choices
_UpperCAmelCase : List[str] = scope
_UpperCAmelCase : Dict = initializer_std
# Used in the tests to check the size of the first attention layer
_UpperCAmelCase : Optional[int] = n_head
# Used in the tests to check the size of the first hidden state
_UpperCAmelCase : Dict = self.d_model
# Used in the tests to check the number of output hidden states/attentions
_UpperCAmelCase : int = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
_UpperCAmelCase : Union[str, Any] = self.num_hidden_layers + 2
def _A ( self : Optional[Any] ):
_UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Any = None
if self.use_input_mask:
_UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : Any = None
if self.use_token_type_ids:
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Dict = None
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : List[Any] = None
if self.use_labels:
_UpperCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : Tuple = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def _A ( self : Dict , A : Tuple , A : Dict , A : int , A : List[Any] , A : Optional[int] , A : Any , A : Dict , ):
_UpperCAmelCase : Tuple = TFFunnelModel(config=A )
_UpperCAmelCase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCAmelCase : Any = model(A )
_UpperCAmelCase : Tuple = [input_ids, input_mask]
_UpperCAmelCase : Optional[Any] = model(A )
_UpperCAmelCase : Any = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Union[str, Any] = TFFunnelModel(config=A )
_UpperCAmelCase : Optional[int] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : List[Any] = TFFunnelModel(config=A )
_UpperCAmelCase : List[Any] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def _A ( self : Dict , A : List[str] , A : int , A : Optional[int] , A : int , A : Optional[Any] , A : List[str] , A : Optional[int] , ):
_UpperCAmelCase : Any = TFFunnelBaseModel(config=A )
_UpperCAmelCase : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCAmelCase : Any = model(A )
_UpperCAmelCase : Any = [input_ids, input_mask]
_UpperCAmelCase : Optional[int] = model(A )
_UpperCAmelCase : List[str] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Dict = TFFunnelBaseModel(config=A )
_UpperCAmelCase : Optional[int] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Any = TFFunnelBaseModel(config=A )
_UpperCAmelCase : Union[str, Any] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def _A ( self : List[str] , A : str , A : Dict , A : Optional[Any] , A : Dict , A : int , A : Optional[Any] , A : Union[str, Any] , ):
_UpperCAmelCase : Optional[int] = TFFunnelForPreTraining(config=A )
_UpperCAmelCase : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCAmelCase : Dict = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def _A ( self : Union[str, Any] , A : Tuple , A : Tuple , A : Any , A : Any , A : Dict , A : Optional[int] , A : List[str] , ):
_UpperCAmelCase : Any = TFFunnelForMaskedLM(config=A )
_UpperCAmelCase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCAmelCase : Any = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self : List[str] , A : int , A : List[Any] , A : List[str] , A : Any , A : Optional[int] , A : str , A : Any , ):
_UpperCAmelCase : Union[str, Any] = self.num_labels
_UpperCAmelCase : str = TFFunnelForSequenceClassification(config=A )
_UpperCAmelCase : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCAmelCase : str = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self : Optional[int] , A : List[Any] , A : Optional[Any] , A : Union[str, Any] , A : Union[str, Any] , A : Optional[Any] , A : int , A : List[str] , ):
_UpperCAmelCase : str = self.num_choices
_UpperCAmelCase : List[str] = TFFunnelForMultipleChoice(config=A )
_UpperCAmelCase : int = tf.tile(tf.expand_dims(A , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase : int = tf.tile(tf.expand_dims(A , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase : int = tf.tile(tf.expand_dims(A , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase : List[str] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_UpperCAmelCase : int = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A ( self : List[Any] , A : Any , A : List[str] , A : Any , A : str , A : int , A : str , A : Any , ):
_UpperCAmelCase : Optional[int] = self.num_labels
_UpperCAmelCase : Any = TFFunnelForTokenClassification(config=A )
_UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCAmelCase : str = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A ( self : Dict , A : int , A : int , A : Union[str, Any] , A : int , A : str , A : List[Any] , A : Optional[int] , ):
_UpperCAmelCase : Dict = TFFunnelForQuestionAnswering(config=A )
_UpperCAmelCase : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCAmelCase : Any = model(A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A ( self : List[str] ):
_UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : Dict = config_and_inputs
_UpperCAmelCase : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase_ (snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: str = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
__UpperCamelCase: Union[str, Any] = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCamelCase: List[str] = False
__UpperCamelCase: str = False
def _A ( self : Optional[Any] ):
_UpperCAmelCase : Any = TFFunnelModelTester(self )
_UpperCAmelCase : List[str] = ConfigTester(self , config_class=A )
def _A ( self : List[str] ):
self.config_tester.run_common_tests()
def _A ( self : List[str] ):
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _A ( self : Any ):
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A )
def _A ( self : Dict ):
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A )
def _A ( self : Optional[int] ):
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
def _A ( self : Optional[int] ):
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
@require_tf
class lowerCamelCase_ (snake_case__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: int = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
__UpperCamelCase: Union[str, Any] = False
__UpperCamelCase: str = False
def _A ( self : Any ):
_UpperCAmelCase : Optional[Any] = TFFunnelModelTester(self , base=A )
_UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=A )
def _A ( self : Dict ):
self.config_tester.run_common_tests()
def _A ( self : Tuple ):
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*A )
def _A ( self : List[str] ):
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def _A ( self : List[Any] ):
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A )
| 244 | '''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Union[str, Any] = (DDPMScheduler,)
def _A ( self : Any , **A : List[str] ):
_UpperCAmelCase : int = {
"num_train_timesteps": 1000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**A )
return config
def _A ( self : List[Any] ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=A )
def _A ( self : Union[str, Any] ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=A , beta_end=A )
def _A ( self : Optional[Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A )
def _A ( self : int ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=A )
def _A ( self : Any ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A )
def _A ( self : Union[str, Any] ):
self.check_over_configs(thresholding=A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=A , prediction_type=A , sample_max_value=A , )
def _A ( self : List[str] ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=A )
def _A ( self : Union[str, Any] ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=A )
def _A ( self : Tuple ):
_UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_UpperCAmelCase : List[Any] = self.get_scheduler_config()
_UpperCAmelCase : int = scheduler_class(**A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def _A ( self : List[Any] ):
_UpperCAmelCase : Optional[Any] = self.scheduler_classes[0]
_UpperCAmelCase : Optional[Any] = self.get_scheduler_config()
_UpperCAmelCase : int = scheduler_class(**A )
_UpperCAmelCase : Optional[Any] = len(A )
_UpperCAmelCase : List[Any] = self.dummy_model()
_UpperCAmelCase : List[str] = self.dummy_sample_deter
_UpperCAmelCase : List[str] = torch.manual_seed(0 )
for t in reversed(range(A ) ):
# 1. predict noise residual
_UpperCAmelCase : List[Any] = model(A , A )
# 2. predict previous mean of sample x_t-1
_UpperCAmelCase : List[Any] = scheduler.step(A , A , A , generator=A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_UpperCAmelCase : Any = pred_prev_sample
_UpperCAmelCase : str = torch.sum(torch.abs(A ) )
_UpperCAmelCase : Tuple = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : List[Any] = self.scheduler_classes[0]
_UpperCAmelCase : Dict = self.get_scheduler_config(prediction_type="v_prediction" )
_UpperCAmelCase : Optional[int] = scheduler_class(**A )
_UpperCAmelCase : Union[str, Any] = len(A )
_UpperCAmelCase : Optional[int] = self.dummy_model()
_UpperCAmelCase : Optional[Any] = self.dummy_sample_deter
_UpperCAmelCase : List[Any] = torch.manual_seed(0 )
for t in reversed(range(A ) ):
# 1. predict noise residual
_UpperCAmelCase : Tuple = model(A , A )
# 2. predict previous mean of sample x_t-1
_UpperCAmelCase : List[Any] = scheduler.step(A , A , A , generator=A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_UpperCAmelCase : Tuple = pred_prev_sample
_UpperCAmelCase : List[str] = torch.sum(torch.abs(A ) )
_UpperCAmelCase : int = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def _A ( self : Optional[Any] ):
_UpperCAmelCase : Optional[Any] = self.scheduler_classes[0]
_UpperCAmelCase : Optional[int] = self.get_scheduler_config()
_UpperCAmelCase : int = scheduler_class(**A )
_UpperCAmelCase : Any = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=A )
_UpperCAmelCase : Optional[Any] = scheduler.timesteps
for i, timestep in enumerate(A ):
if i == len(A ) - 1:
_UpperCAmelCase : int = -1
else:
_UpperCAmelCase : str = timesteps[i + 1]
_UpperCAmelCase : Any = scheduler.previous_timestep(A )
_UpperCAmelCase : Optional[Any] = prev_t.item()
self.assertEqual(A , A )
def _A ( self : Optional[int] ):
_UpperCAmelCase : List[Any] = self.scheduler_classes[0]
_UpperCAmelCase : Union[str, Any] = self.get_scheduler_config()
_UpperCAmelCase : Optional[Any] = scheduler_class(**A )
_UpperCAmelCase : Optional[int] = [100, 87, 50, 51, 0]
with self.assertRaises(A , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=A )
def _A ( self : Dict ):
_UpperCAmelCase : Optional[int] = self.scheduler_classes[0]
_UpperCAmelCase : Tuple = self.get_scheduler_config()
_UpperCAmelCase : str = scheduler_class(**A )
_UpperCAmelCase : str = [100, 87, 50, 1, 0]
_UpperCAmelCase : Tuple = len(A )
with self.assertRaises(A , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=A , timesteps=A )
def _A ( self : List[str] ):
_UpperCAmelCase : List[str] = self.scheduler_classes[0]
_UpperCAmelCase : str = self.get_scheduler_config()
_UpperCAmelCase : int = scheduler_class(**A )
_UpperCAmelCase : List[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
A , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=A )
| 244 | 1 |
def _snake_case ( __snake_case , __snake_case ):
return abs(__snake_case ) if a == 0 else greatest_common_divisor(b % a , __snake_case )
def _snake_case ( __snake_case , __snake_case ):
while y: # --> when y=0 then loop will terminate and return x as final GCD.
_UpperCamelCase , _UpperCamelCase = y, x % y
return abs(__snake_case )
def _snake_case ( ):
try:
_UpperCamelCase = input('''Enter two integers separated by comma (,): ''' ).split(''',''' )
_UpperCamelCase = int(nums[0] )
_UpperCamelCase = int(nums[1] )
print(
f"""greatest_common_divisor({num_a}, {num_a}) = """
f"""{greatest_common_divisor(__snake_case , __snake_case )}""" )
print(f"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__snake_case , __snake_case )}""" )
except (IndexError, UnboundLocalError, ValueError):
print('''Wrong input''' )
if __name__ == "__main__":
main()
| 718 | from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ):
_UpperCamelCase = tau * frequency / samplerate
_UpperCamelCase = sin(__snake_case )
_UpperCamelCase = cos(__snake_case )
_UpperCamelCase = _sin / (2 * q_factor)
_UpperCamelCase = (1 - _cos) / 2
_UpperCamelCase = 1 - _cos
_UpperCamelCase = 1 + alpha
_UpperCamelCase = -2 * _cos
_UpperCamelCase = 1 - alpha
_UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ):
_UpperCamelCase = tau * frequency / samplerate
_UpperCamelCase = sin(__snake_case )
_UpperCamelCase = cos(__snake_case )
_UpperCamelCase = _sin / (2 * q_factor)
_UpperCamelCase = (1 + _cos) / 2
_UpperCamelCase = -1 - _cos
_UpperCamelCase = 1 + alpha
_UpperCamelCase = -2 * _cos
_UpperCamelCase = 1 - alpha
_UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ):
_UpperCamelCase = tau * frequency / samplerate
_UpperCamelCase = sin(__snake_case )
_UpperCamelCase = cos(__snake_case )
_UpperCamelCase = _sin / (2 * q_factor)
_UpperCamelCase = _sin / 2
_UpperCamelCase = 0
_UpperCamelCase = -ba
_UpperCamelCase = 1 + alpha
_UpperCamelCase = -2 * _cos
_UpperCamelCase = 1 - alpha
_UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ):
_UpperCamelCase = tau * frequency / samplerate
_UpperCamelCase = sin(__snake_case )
_UpperCamelCase = cos(__snake_case )
_UpperCamelCase = _sin / (2 * q_factor)
_UpperCamelCase = 1 - alpha
_UpperCamelCase = -2 * _cos
_UpperCamelCase = 1 + alpha
_UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ):
_UpperCamelCase = tau * frequency / samplerate
_UpperCamelCase = sin(__snake_case )
_UpperCamelCase = cos(__snake_case )
_UpperCamelCase = _sin / (2 * q_factor)
_UpperCamelCase = 10 ** (gain_db / 40)
_UpperCamelCase = 1 + alpha * big_a
_UpperCamelCase = -2 * _cos
_UpperCamelCase = 1 - alpha * big_a
_UpperCamelCase = 1 + alpha / big_a
_UpperCamelCase = -2 * _cos
_UpperCamelCase = 1 - alpha / big_a
_UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ):
_UpperCamelCase = tau * frequency / samplerate
_UpperCamelCase = sin(__snake_case )
_UpperCamelCase = cos(__snake_case )
_UpperCamelCase = _sin / (2 * q_factor)
_UpperCamelCase = 10 ** (gain_db / 40)
_UpperCamelCase = (big_a + 1) - (big_a - 1) * _cos
_UpperCamelCase = (big_a + 1) + (big_a - 1) * _cos
_UpperCamelCase = (big_a - 1) - (big_a + 1) * _cos
_UpperCamelCase = (big_a - 1) + (big_a + 1) * _cos
_UpperCamelCase = 2 * sqrt(__snake_case ) * alpha
_UpperCamelCase = big_a * (pmc + aaa)
_UpperCamelCase = 2 * big_a * mpc
_UpperCamelCase = big_a * (pmc - aaa)
_UpperCamelCase = ppmc + aaa
_UpperCamelCase = -2 * pmpc
_UpperCamelCase = ppmc - aaa
_UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ):
_UpperCamelCase = tau * frequency / samplerate
_UpperCamelCase = sin(__snake_case )
_UpperCamelCase = cos(__snake_case )
_UpperCamelCase = _sin / (2 * q_factor)
_UpperCamelCase = 10 ** (gain_db / 40)
_UpperCamelCase = (big_a + 1) - (big_a - 1) * _cos
_UpperCamelCase = (big_a + 1) + (big_a - 1) * _cos
_UpperCamelCase = (big_a - 1) - (big_a + 1) * _cos
_UpperCamelCase = (big_a - 1) + (big_a + 1) * _cos
_UpperCamelCase = 2 * sqrt(__snake_case ) * alpha
_UpperCamelCase = big_a * (ppmc + aaa)
_UpperCamelCase = -2 * big_a * pmpc
_UpperCamelCase = big_a * (ppmc - aaa)
_UpperCamelCase = pmc + aaa
_UpperCamelCase = 2 * mpc
_UpperCamelCase = pmc - aaa
_UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 71 | 0 |
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
_lowerCamelCase = parser.parse_args()
_lowerCamelCase = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 144 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : List[str] , __UpperCamelCase : str ) -> Optional[Any]:
if isinstance(__UpperCamelCase , torch.Tensor ):
return image
elif isinstance(__UpperCamelCase , PIL.Image.Image ):
UpperCAmelCase_ = [image]
if isinstance(image[0] , PIL.Image.Image ):
UpperCAmelCase_ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
UpperCAmelCase_ = np.concatenate(__UpperCamelCase , axis=0 )
UpperCAmelCase_ = np.array(__UpperCamelCase ).astype(np.floataa ) / 255.0
UpperCAmelCase_ = image.transpose(0 , 3 , 1 , 2 )
UpperCAmelCase_ = 2.0 * image - 1.0
UpperCAmelCase_ = torch.from_numpy(__UpperCamelCase )
elif isinstance(image[0] , torch.Tensor ):
UpperCAmelCase_ = torch.cat(__UpperCamelCase , dim=0 )
return image
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : Optional[int]=0.9_995 ) -> List[str]:
if not isinstance(__UpperCamelCase , np.ndarray ):
UpperCAmelCase_ = True
UpperCAmelCase_ = va.device
UpperCAmelCase_ = va.cpu().numpy()
UpperCAmelCase_ = va.cpu().numpy()
UpperCAmelCase_ = np.sum(va * va / (np.linalg.norm(__UpperCamelCase ) * np.linalg.norm(__UpperCamelCase )) )
if np.abs(__UpperCamelCase ) > DOT_THRESHOLD:
UpperCAmelCase_ = (1 - t) * va + t * va
else:
UpperCAmelCase_ = np.arccos(__UpperCamelCase )
UpperCAmelCase_ = np.sin(__UpperCamelCase )
UpperCAmelCase_ = theta_a * t
UpperCAmelCase_ = np.sin(__UpperCamelCase )
UpperCAmelCase_ = np.sin(theta_a - theta_t ) / sin_theta_a
UpperCAmelCase_ = sin_theta_t / sin_theta_a
UpperCAmelCase_ = sa * va + sa * va
if inputs_are_torch:
UpperCAmelCase_ = torch.from_numpy(__UpperCamelCase ).to(__UpperCamelCase )
return va
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : List[str] ) -> Optional[int]:
UpperCAmelCase_ = F.normalize(__UpperCamelCase , dim=-1 )
UpperCAmelCase_ = F.normalize(__UpperCamelCase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ) -> Optional[int]:
for param in model.parameters():
UpperCAmelCase_ = value
class a ( _A ):
'''simple docstring'''
def __init__( self : List[str] , __snake_case : AutoencoderKL , __snake_case : CLIPTextModel , __snake_case : CLIPModel , __snake_case : CLIPTokenizer , __snake_case : UNetaDConditionModel , __snake_case : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , __snake_case : CLIPFeatureExtractor , __snake_case : Dict=None , __snake_case : List[str]=None , __snake_case : Optional[Any]=None , ):
super().__init__()
self.register_modules(
vae=__snake_case , text_encoder=__snake_case , clip_model=__snake_case , tokenizer=__snake_case , unet=__snake_case , scheduler=__snake_case , feature_extractor=__snake_case , coca_model=__snake_case , coca_tokenizer=__snake_case , coca_transform=__snake_case , )
UpperCAmelCase_ = (
feature_extractor.size
if isinstance(feature_extractor.size , __snake_case )
else feature_extractor.size['''shortest_edge''']
)
UpperCAmelCase_ = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , __snake_case )
set_requires_grad(self.clip_model , __snake_case )
def lowerCamelCase_ ( self : Dict , __snake_case : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__snake_case )
def lowerCamelCase_ ( self : str ):
self.enable_attention_slicing(__snake_case )
def lowerCamelCase_ ( self : List[Any] ):
set_requires_grad(self.vae , __snake_case )
def lowerCamelCase_ ( self : int ):
set_requires_grad(self.vae , __snake_case )
def lowerCamelCase_ ( self : str ):
set_requires_grad(self.unet , __snake_case )
def lowerCamelCase_ ( self : List[Any] ):
set_requires_grad(self.unet , __snake_case )
def lowerCamelCase_ ( self : Any , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : List[str] ):
# get the original timestep using init_timestep
UpperCAmelCase_ = min(int(num_inference_steps * strength ) , __snake_case )
UpperCAmelCase_ = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase_ ( self : int , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : str=None ):
if not isinstance(__snake_case , torch.Tensor ):
raise ValueError(F'`image` has to be of type `torch.Tensor` but is {type(__snake_case )}' )
UpperCAmelCase_ = image.to(device=__snake_case , dtype=__snake_case )
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__snake_case )
]
UpperCAmelCase_ = torch.cat(__snake_case , dim=0 )
else:
UpperCAmelCase_ = self.vae.encode(__snake_case ).latent_dist.sample(__snake_case )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCAmelCase_ = 0.18_215 * init_latents
UpperCAmelCase_ = init_latents.repeat_interleave(__snake_case , dim=0 )
UpperCAmelCase_ = randn_tensor(init_latents.shape , generator=__snake_case , device=__snake_case , dtype=__snake_case )
# get latents
UpperCAmelCase_ = self.scheduler.add_noise(__snake_case , __snake_case , __snake_case )
UpperCAmelCase_ = init_latents
return latents
def lowerCamelCase_ ( self : Dict , __snake_case : Dict ):
UpperCAmelCase_ = self.coca_transform(__snake_case ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
UpperCAmelCase_ = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
UpperCAmelCase_ = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def lowerCamelCase_ ( self : Optional[int] , __snake_case : Any , __snake_case : List[Any] ):
UpperCAmelCase_ = self.feature_extractor.preprocess(__snake_case )
UpperCAmelCase_ = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
UpperCAmelCase_ = self.clip_model.get_image_features(__snake_case )
UpperCAmelCase_ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__snake_case )
UpperCAmelCase_ = image_embeddings_clip.repeat_interleave(__snake_case , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def lowerCamelCase_ ( self : Tuple , __snake_case : Tuple , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : Union[str, Any] , ):
UpperCAmelCase_ = latents.detach().requires_grad_()
UpperCAmelCase_ = self.scheduler.scale_model_input(__snake_case , __snake_case )
# predict the noise residual
UpperCAmelCase_ = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
UpperCAmelCase_ = self.scheduler.alphas_cumprod[timestep]
UpperCAmelCase_ = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
UpperCAmelCase_ = torch.sqrt(__snake_case )
UpperCAmelCase_ = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , __snake_case ):
UpperCAmelCase_ = self.scheduler.sigmas[index]
UpperCAmelCase_ = latents - sigma * noise_pred
else:
raise ValueError(F'scheduler type {type(self.scheduler )} not supported' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCAmelCase_ = 1 / 0.18_215 * sample
UpperCAmelCase_ = self.vae.decode(__snake_case ).sample
UpperCAmelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_ = transforms.Resize(self.feature_extractor_size )(__snake_case )
UpperCAmelCase_ = self.normalize(__snake_case ).to(latents.dtype )
UpperCAmelCase_ = self.clip_model.get_image_features(__snake_case )
UpperCAmelCase_ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__snake_case )
UpperCAmelCase_ = spherical_dist_loss(__snake_case , __snake_case ).mean() * clip_guidance_scale
UpperCAmelCase_ = -torch.autograd.grad(__snake_case , __snake_case )[0]
if isinstance(self.scheduler , __snake_case ):
UpperCAmelCase_ = latents.detach() + grads * (sigma**2)
UpperCAmelCase_ = noise_pred_original
else:
UpperCAmelCase_ = noise_pred_original - torch.sqrt(__snake_case ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : Tuple , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : Optional[str] = None , __snake_case : Optional[str] = None , __snake_case : Optional[int] = 5_12 , __snake_case : Optional[int] = 5_12 , __snake_case : float = 0.6 , __snake_case : Optional[int] = 50 , __snake_case : Optional[float] = 7.5 , __snake_case : Optional[int] = 1 , __snake_case : float = 0.0 , __snake_case : Optional[float] = 1_00 , __snake_case : Optional[torch.Generator] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , __snake_case : float = 0.8 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , ):
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size:
raise ValueError(F'You have passed {batch_size} batch_size, but only {len(__snake_case )} generators.' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if isinstance(__snake_case , torch.Generator ) and batch_size > 1:
UpperCAmelCase_ = [generator] + [None] * (batch_size - 1)
UpperCAmelCase_ = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
UpperCAmelCase_ = [x[0] for x in coca_is_none if x[1]]
UpperCAmelCase_ = ''', '''.join(__snake_case )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(__snake_case ):
raise ValueError(
F'Content prompt is None and CoCa [{coca_is_none_str}] is None.'
F'Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
UpperCAmelCase_ = self.get_image_description(__snake_case )
if style_prompt is None:
if len(__snake_case ):
raise ValueError(
F'Style prompt is None and CoCa [{coca_is_none_str}] is None.'
F' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
UpperCAmelCase_ = self.get_image_description(__snake_case )
# get prompt text embeddings for content and style
UpperCAmelCase_ = self.tokenizer(
__snake_case , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=__snake_case , return_tensors='''pt''' , )
UpperCAmelCase_ = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
UpperCAmelCase_ = self.tokenizer(
__snake_case , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=__snake_case , return_tensors='''pt''' , )
UpperCAmelCase_ = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
UpperCAmelCase_ = slerp(__snake_case , __snake_case , __snake_case )
# duplicate text embeddings for each generation per prompt
UpperCAmelCase_ = text_embeddings.repeat_interleave(__snake_case , dim=0 )
# set timesteps
UpperCAmelCase_ = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
UpperCAmelCase_ = {}
if accepts_offset:
UpperCAmelCase_ = 1
self.scheduler.set_timesteps(__snake_case , **__snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
UpperCAmelCase_ , UpperCAmelCase_ = self.get_timesteps(__snake_case , __snake_case , self.device )
UpperCAmelCase_ = timesteps[:1].repeat(__snake_case )
# Preprocess image
UpperCAmelCase_ = preprocess(__snake_case , __snake_case , __snake_case )
UpperCAmelCase_ = self.prepare_latents(
__snake_case , __snake_case , __snake_case , text_embeddings.dtype , self.device , __snake_case )
UpperCAmelCase_ = preprocess(__snake_case , __snake_case , __snake_case )
UpperCAmelCase_ = self.prepare_latents(
__snake_case , __snake_case , __snake_case , text_embeddings.dtype , self.device , __snake_case )
UpperCAmelCase_ = slerp(__snake_case , __snake_case , __snake_case )
if clip_guidance_scale > 0:
UpperCAmelCase_ = self.get_clip_image_embeddings(__snake_case , __snake_case )
UpperCAmelCase_ = self.get_clip_image_embeddings(__snake_case , __snake_case )
UpperCAmelCase_ = slerp(
__snake_case , __snake_case , __snake_case )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCAmelCase_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase_ = content_text_input.input_ids.shape[-1]
UpperCAmelCase_ = self.tokenizer([''''''] , padding='''max_length''' , max_length=__snake_case , return_tensors='''pt''' )
UpperCAmelCase_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
UpperCAmelCase_ = uncond_embeddings.repeat_interleave(__snake_case , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCAmelCase_ = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
UpperCAmelCase_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
UpperCAmelCase_ = torch.randn(__snake_case , generator=__snake_case , device='''cpu''' , dtype=__snake_case ).to(
self.device )
else:
UpperCAmelCase_ = torch.randn(__snake_case , generator=__snake_case , device=self.device , dtype=__snake_case )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
UpperCAmelCase_ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase_ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase_ = {}
if accepts_eta:
UpperCAmelCase_ = eta
# check if the scheduler accepts generator
UpperCAmelCase_ = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
UpperCAmelCase_ = generator
with self.progress_bar(total=__snake_case ):
for i, t in enumerate(__snake_case ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ = self.scheduler.scale_model_input(__snake_case , __snake_case )
# predict the noise residual
UpperCAmelCase_ = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ = noise_pred.chunk(2 )
UpperCAmelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
UpperCAmelCase_ = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
UpperCAmelCase_ , UpperCAmelCase_ = self.cond_fn(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ = self.scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCAmelCase_ = 1 / 0.18_215 * latents
UpperCAmelCase_ = self.vae.decode(__snake_case ).sample
UpperCAmelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase_ = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=__snake_case , nsfw_content_detected=__snake_case )
| 144 | 1 |
import math
import sys
def lowerCamelCase( a__):
if number != int(a__):
raise ValueError('''the value of input must be a natural number''')
if number < 0:
raise ValueError('''the value of input must not be a negative number''')
if number == 0:
return 1
_SCREAMING_SNAKE_CASE =[-1] * (number + 1)
_SCREAMING_SNAKE_CASE =0
for i in range(1 ,number + 1):
_SCREAMING_SNAKE_CASE =sys.maxsize
_SCREAMING_SNAKE_CASE =int(math.sqrt(a__))
for j in range(1 ,root + 1):
_SCREAMING_SNAKE_CASE =1 + answers[i - (j**2)]
_SCREAMING_SNAKE_CASE =min(a__ ,a__)
_SCREAMING_SNAKE_CASE =answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 191 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
snake_case_ : Any = datasets.utils.logging.get_logger(__name__)
snake_case_ : List[Any] = ['''names''', '''prefix''']
snake_case_ : Dict = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
snake_case_ : Dict = ['''encoding_errors''', '''on_bad_lines''']
snake_case_ : Optional[Any] = ['''date_format''']
@dataclass
class A__ ( datasets.BuilderConfig ):
UpperCAmelCase = ","
UpperCAmelCase = None
UpperCAmelCase = "infer"
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = True
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = False
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = True
UpperCAmelCase = None
UpperCAmelCase = "."
UpperCAmelCase = None
UpperCAmelCase = '"'
UpperCAmelCase = 0
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = 0
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = None
UpperCAmelCase = 10000
UpperCAmelCase = None
UpperCAmelCase = "strict"
UpperCAmelCase = "error"
UpperCAmelCase = None
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
if self.delimiter is not None:
_SCREAMING_SNAKE_CASE =self.delimiter
if self.column_names is not None:
_SCREAMING_SNAKE_CASE =self.column_names
@property
def __UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={
'''sep''': self.sep,
'''header''': self.header,
'''names''': self.names,
'''index_col''': self.index_col,
'''usecols''': self.usecols,
'''prefix''': self.prefix,
'''mangle_dupe_cols''': self.mangle_dupe_cols,
'''engine''': self.engine,
'''converters''': self.converters,
'''true_values''': self.true_values,
'''false_values''': self.false_values,
'''skipinitialspace''': self.skipinitialspace,
'''skiprows''': self.skiprows,
'''nrows''': self.nrows,
'''na_values''': self.na_values,
'''keep_default_na''': self.keep_default_na,
'''na_filter''': self.na_filter,
'''verbose''': self.verbose,
'''skip_blank_lines''': self.skip_blank_lines,
'''thousands''': self.thousands,
'''decimal''': self.decimal,
'''lineterminator''': self.lineterminator,
'''quotechar''': self.quotechar,
'''quoting''': self.quoting,
'''escapechar''': self.escapechar,
'''comment''': self.comment,
'''encoding''': self.encoding,
'''dialect''': self.dialect,
'''error_bad_lines''': self.error_bad_lines,
'''warn_bad_lines''': self.warn_bad_lines,
'''skipfooter''': self.skipfooter,
'''doublequote''': self.doublequote,
'''memory_map''': self.memory_map,
'''float_precision''': self.float_precision,
'''chunksize''': self.chunksize,
'''encoding_errors''': self.encoding_errors,
'''on_bad_lines''': self.on_bad_lines,
'''date_format''': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , _a ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A__ ( datasets.ArrowBasedBuilder ):
UpperCAmelCase = CsvConfig
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def __UpperCamelCase ( self : Dict , _a : str ) -> List[str]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" )
_SCREAMING_SNAKE_CASE =dl_manager.download_and_extract(self.config.data_files )
if isinstance(_a , (str, list, tuple) ):
_SCREAMING_SNAKE_CASE =data_files
if isinstance(_a , _a ):
_SCREAMING_SNAKE_CASE =[files]
_SCREAMING_SNAKE_CASE =[dl_manager.iter_files(_a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
_SCREAMING_SNAKE_CASE =[]
for split_name, files in data_files.items():
if isinstance(_a , _a ):
_SCREAMING_SNAKE_CASE =[files]
_SCREAMING_SNAKE_CASE =[dl_manager.iter_files(_a ) for file in files]
splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={'''files''': files} ) )
return splits
def __UpperCamelCase ( self : Tuple , _a : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
_SCREAMING_SNAKE_CASE =self.config.features.arrow_schema
if all(not require_storage_cast(_a ) for feature in self.config.features.values() ):
# cheaper cast
_SCREAMING_SNAKE_CASE =pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=_a )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_SCREAMING_SNAKE_CASE =table_cast(_a , _a )
return pa_table
def __UpperCamelCase ( self : str , _a : Union[str, Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_SCREAMING_SNAKE_CASE =(
{
name: dtype.to_pandas_dtype() if not require_storage_cast(_a ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(_a ) ):
_SCREAMING_SNAKE_CASE =pd.read_csv(_a , iterator=_a , dtype=_a , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(_a ):
_SCREAMING_SNAKE_CASE =pa.Table.from_pandas(_a )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_a )
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(_a )}: {e}" )
raise | 191 | 1 |
def UpperCamelCase ( snake_case__ : int ,snake_case__ : int ,snake_case__ : int ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
__snake_case :int = _modexpt(snake_case__ ,exponent // 2 ,snake_case__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(snake_case__ ,exponent - 1 ,snake_case__ )) % modulo_value
def UpperCamelCase ( snake_case__ : int = 1777 ,snake_case__ : int = 1855 ,snake_case__ : int = 8 ):
'''simple docstring'''
__snake_case :int = base
for _ in range(1 ,snake_case__ ):
__snake_case :Union[str, Any] = _modexpt(snake_case__ ,snake_case__ ,10**digits )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 455 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class snake_case__ ( lowercase_):
'''simple docstring'''
def __init__( self , a__=0.01 , a__=10_00 ) -> List[Any]:
'''simple docstring'''
__snake_case :int = p_stop
__snake_case :List[Any] = max_length
def __iter__( self ) -> Optional[int]:
'''simple docstring'''
__snake_case :str = 0
__snake_case :Optional[Any] = False
while not stop and count < self.max_length:
yield count
count += 1
__snake_case :str = random.random() < self.p_stop
class snake_case__ ( unittest.TestCase):
'''simple docstring'''
def __lowercase ( self , a__ , a__ , a__=False , a__=True ) -> List[Any]:
'''simple docstring'''
__snake_case :Optional[Any] = [
BatchSamplerShard(a__ , 2 , a__ , split_batches=a__ , even_batches=a__ )
for i in range(2 )
]
__snake_case :Union[str, Any] = [list(a__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(a__ ) for shard in batch_sampler_shards] , [len(a__ ) for e in expected] )
self.assertListEqual(a__ , a__ )
def __lowercase ( self ) -> Any:
'''simple docstring'''
__snake_case :Optional[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
__snake_case :Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ )
__snake_case :Optional[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case :List[str] = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
__snake_case :Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(a__ , a__ )
__snake_case :List[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
__snake_case :Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case :Optional[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
__snake_case :List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(a__ , a__ )
__snake_case :List[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
__snake_case :List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case :List[str] = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
__snake_case :int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(a__ , a__ )
__snake_case :Optional[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
__snake_case :Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is very small.
__snake_case :Dict = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
__snake_case :Optional[Any] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(a__ , a__ )
__snake_case :Union[str, Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
__snake_case :List[str] = [[], []]
self.check_batch_sampler_shards(a__ , a__ )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :Optional[int] = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
__snake_case :Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
__snake_case :List[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case :List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
__snake_case :Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
__snake_case :List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
__snake_case :Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case :Union[str, Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
__snake_case :Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
__snake_case :Any = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
__snake_case :Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is very small.
__snake_case :str = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
__snake_case :Dict = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
__snake_case :Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
__snake_case :List[Any] = [[], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
def __lowercase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case :Tuple = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
__snake_case :int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
__snake_case :List[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case :Any = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
__snake_case :List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
__snake_case :Tuple = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
__snake_case :Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case :int = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
__snake_case :Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
__snake_case :Tuple = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
__snake_case :Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case :Dict = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
__snake_case :int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
__snake_case :Optional[int] = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
__snake_case :List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is very small.
__snake_case :Union[str, Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
__snake_case :List[Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
__snake_case :List[str] = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
__snake_case :int = [[], []]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :int = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
__snake_case :Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
__snake_case :Dict = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case :List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
__snake_case :Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
__snake_case :Dict = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
__snake_case :Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case :Dict = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
__snake_case :List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
__snake_case :int = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
__snake_case :Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is very small.
__snake_case :List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
__snake_case :List[str] = [[[0, 1]], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
__snake_case :Any = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
__snake_case :List[str] = [[], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case :str = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__snake_case :List[str] = [BatchSamplerShard(a__ , 2 , a__ , even_batches=a__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def __lowercase ( self , a__ , a__ , a__ , a__=False , a__=2 , a__=False ) -> List[str]:
'''simple docstring'''
random.seed(a__ )
__snake_case :Optional[int] = list(a__ )
__snake_case :Union[str, Any] = [
IterableDatasetShard(
a__ , batch_size=a__ , drop_last=a__ , num_processes=a__ , process_index=a__ , split_batches=a__ , )
for i in range(a__ )
]
__snake_case :Any = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(a__ )
iterable_dataset_lists.append(list(a__ ) )
__snake_case :Union[str, Any] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__snake_case :str = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(a__ ) , len(a__ ) )
self.assertTrue(len(a__ ) % shard_batch_size == 0 )
__snake_case :int = []
for idx in range(0 , len(a__ ) , a__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(a__ ) < len(a__ ):
reference += reference
self.assertListEqual(a__ , reference[: len(a__ )] )
def __lowercase ( self ) -> Any:
'''simple docstring'''
__snake_case :int = 42
__snake_case :Tuple = RandomIterableDataset()
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
# Edge case with a very small dataset
__snake_case :Optional[int] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :str = BatchSampler(range(16 ) , batch_size=4 , drop_last=a__ )
__snake_case :Optional[int] = SkipBatchSampler(a__ , 2 )
self.assertListEqual(list(a__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __lowercase ( self ) -> str:
'''simple docstring'''
__snake_case :str = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :Union[str, Any] = DataLoader(list(range(16 ) ) , batch_size=4 )
__snake_case :Dict = skip_first_batches(a__ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case :Any = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def __lowercase ( self ) -> Any:
'''simple docstring'''
Accelerator()
__snake_case :Union[str, Any] = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 455 | 1 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __snake_case ( ):
__a = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=_UpperCAmelCase , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=_UpperCAmelCase , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=_UpperCAmelCase )
return parser.parse_args()
def __snake_case ( ):
__a = parse_args()
# Import training_script as a module.
__a = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__a = script_fpath.stem
__a = importlib.import_module(_UpperCAmelCase )
# Patch sys.argv
__a = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 711 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__snake_case :str = get_logger()
__snake_case :Optional[dict] = None
class _A ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ):
def __init__( self : str , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : List[Any]=None , **__SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
super().__init__(features=__SCREAMING_SNAKE_CASE)
import jax
from jaxlib.xla_client import Device
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
raise ValueError(
F'Expected {device} to be a `str` not {type(__SCREAMING_SNAKE_CASE)}, as `jaxlib.xla_extension.Device` '
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''')
__a = device if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else str(jax.devices()[0])
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__a = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys()):
logger.warning(
F'Device with string identifier {self.device} not listed among the available '
F'devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default '
F'device: {str(jax.devices()[0])}.')
__a = str(jax.devices()[0])
__a = jnp_array_kwargs
@staticmethod
def _lowerCamelCase ( ):
'''simple docstring'''
import jax
return {str(__SCREAMING_SNAKE_CASE): device for device in jax.devices()}
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) and column:
if all(
isinstance(__SCREAMING_SNAKE_CASE , jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column):
return jnp.stack(__SCREAMING_SNAKE_CASE , axis=0)
return column
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__SCREAMING_SNAKE_CASE , (str, bytes, type(__SCREAMING_SNAKE_CASE))):
return value
elif isinstance(__SCREAMING_SNAKE_CASE , (np.character, np.ndarray)) and np.issubdtype(value.dtype , np.character):
return value.tolist()
__a = {}
if isinstance(__SCREAMING_SNAKE_CASE , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.integer):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
__a = {'''dtype''': jnp.intaa}
else:
__a = {'''dtype''': jnp.intaa}
elif isinstance(__SCREAMING_SNAKE_CASE , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.floating):
__a = {'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image):
__a = np.asarray(__SCREAMING_SNAKE_CASE)
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__a = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device]):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__SCREAMING_SNAKE_CASE , **{**default_dtype, **self.jnp_array_kwargs})
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor):
return self._tensorize(data_struct.detach().cpu().numpy()[()])
if hasattr(__SCREAMING_SNAKE_CASE , '''__array__''') and not isinstance(__SCREAMING_SNAKE_CASE , jax.Array):
__a = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__SCREAMING_SNAKE_CASE , np.ndarray):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__SCREAMING_SNAKE_CASE) for substruct in data_struct])
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple)):
return self._consolidate([self.recursive_tensorize(__SCREAMING_SNAKE_CASE) for substruct in data_struct])
return self._tensorize(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : dict):
'''simple docstring'''
return map_nested(self._recursive_tensorize , __SCREAMING_SNAKE_CASE , map_list=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : pa.Table):
'''simple docstring'''
__a = self.numpy_arrow_extractor().extract_row(__SCREAMING_SNAKE_CASE)
__a = self.python_features_decoder.decode_row(__SCREAMING_SNAKE_CASE)
return self.recursive_tensorize(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : pa.Table):
'''simple docstring'''
__a = self.numpy_arrow_extractor().extract_column(__SCREAMING_SNAKE_CASE)
__a = self.python_features_decoder.decode_column(__SCREAMING_SNAKE_CASE , pa_table.column_names[0])
__a = self.recursive_tensorize(__SCREAMING_SNAKE_CASE)
__a = self._consolidate(__SCREAMING_SNAKE_CASE)
return column
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : pa.Table):
'''simple docstring'''
__a = self.numpy_arrow_extractor().extract_batch(__SCREAMING_SNAKE_CASE)
__a = self.python_features_decoder.decode_batch(__SCREAMING_SNAKE_CASE)
__a = self.recursive_tensorize(__SCREAMING_SNAKE_CASE)
for column_name in batch:
__a = self._consolidate(batch[column_name])
return batch
| 60 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class A_ ( __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE = """xlm-roberta-xl"""
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple=25_08_80 , __SCREAMING_SNAKE_CASE : Optional[int]=25_60 , __SCREAMING_SNAKE_CASE : str=36 , __SCREAMING_SNAKE_CASE : List[Any]=32 , __SCREAMING_SNAKE_CASE : Dict=1_02_40 , __SCREAMING_SNAKE_CASE : Optional[Any]="gelu" , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : int=5_14 , __SCREAMING_SNAKE_CASE : Optional[Any]=1 , __SCREAMING_SNAKE_CASE : List[str]=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=1E-05 , __SCREAMING_SNAKE_CASE : Tuple=1 , __SCREAMING_SNAKE_CASE : int=0 , __SCREAMING_SNAKE_CASE : List[str]=2 , __SCREAMING_SNAKE_CASE : str="absolute" , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : str=None , **__SCREAMING_SNAKE_CASE : List[str] , ):
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = position_embedding_type
__a = use_cache
__a = classifier_dropout
class A_ ( __SCREAMING_SNAKE_CASE ):
@property
def _UpperCAmelCase ( self : Any ):
if self.task == "multiple-choice":
__a = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__a = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 197 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : List[Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = None
if token is not None:
snake_case__ : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : List[Any] = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : str = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Tuple = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : Dict = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
snake_case__ : Union[str, Any] = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : Dict = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Dict = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Dict = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : Any = result.headers["""Location"""]
snake_case__ : Tuple = requests.get(__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : int = os.path.join(__magic_name__ , f"{artifact_name}.zip" )
with open(__magic_name__ , """wb""" ) as fp:
fp.write(response.content )
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : str=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Any = []
snake_case__ : Union[str, Any] = []
snake_case__ : Any = None
with zipfile.ZipFile(__magic_name__ ) as z:
for filename in z.namelist():
if not os.path.isdir(__magic_name__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__magic_name__ ) as f:
for line in f:
snake_case__ : Any = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
snake_case__ : str = line[: line.index(""": """ )]
snake_case__ : Optional[int] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
snake_case__ : Dict = line[len("""FAILED """ ) :]
failed_tests.append(__magic_name__ )
elif filename == "job_name.txt":
snake_case__ : Optional[Any] = line
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError(
f"`errors` and `failed_tests` should have the same number of elements. Got {len(__magic_name__ )} for `errors` "
f"and {len(__magic_name__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
""" problem.""" )
snake_case__ : Optional[Any] = None
if job_name and job_links:
snake_case__ : Optional[Any] = job_links.get(__magic_name__ , __magic_name__ )
# A list with elements of the form (line of error, error, failed test)
snake_case__ : List[Any] = [x + [y] + [job_link] for x, y in zip(__magic_name__ , __magic_name__ )]
return result
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : Union[str, Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = []
snake_case__ : Dict = [os.path.join(__magic_name__ , __magic_name__ ) for p in os.listdir(__magic_name__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__magic_name__ , job_links=__magic_name__ ) )
return errors
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : str=None ) -> List[Any]:
'''simple docstring'''
snake_case__ : Any = Counter()
counter.update([x[1] for x in logs] )
snake_case__ : Dict = counter.most_common()
snake_case__ : Any = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
snake_case__ : int = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
snake_case__ : Tuple = test.split("""/""" )[2]
else:
snake_case__ : Any = None
return test
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : Union[str, Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : List[str] = [(x[0], x[1], get_model(x[2] )) for x in logs]
snake_case__ : List[Any] = [x for x in logs if x[2] is not None]
snake_case__ : Any = {x[2] for x in logs}
snake_case__ : Optional[Any] = {}
for test in tests:
snake_case__ : str = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
snake_case__ : Optional[int] = counter.most_common()
snake_case__ : Optional[int] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
snake_case__ : int = sum(error_counts.values() )
if n_errors > 0:
snake_case__ : str = {"""count""": n_errors, """errors""": error_counts}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : int ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Optional[Any] = """| no. | error | status |"""
snake_case__ : int = """|-:|:-|:-|"""
snake_case__ : int = [header, sep]
for error in reduced_by_error:
snake_case__ : Union[str, Any] = reduced_by_error[error]["""count"""]
snake_case__ : Dict = f"| {count} | {error[:1_00]} | |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> List[Any]:
'''simple docstring'''
snake_case__ : List[Any] = """| model | no. of errors | major error | count |"""
snake_case__ : Optional[int] = """|-:|-:|-:|-:|"""
snake_case__ : Dict = [header, sep]
for model in reduced_by_model:
snake_case__ : Tuple = reduced_by_model[model]["""count"""]
snake_case__ , snake_case__ : Tuple = list(reduced_by_model[model]["""errors"""].items() )[0]
snake_case__ : Optional[int] = f"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
A_ : int = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
A_ : Optional[int] = get_job_links(args.workflow_run_id, token=args.token)
A_ : Optional[Any] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
A_ : int = k.find(" / ")
A_ : List[Any] = k[index + len(" / ") :]
A_ : List[str] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
A_ : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
A_ : str = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
A_ : List[str] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
A_ : Any = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
A_ : Any = reduce_by_error(errors)
A_ : Union[str, Any] = reduce_by_model(errors)
A_ : Any = make_github_table(reduced_by_error)
A_ : Optional[Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 38 | 0 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
a : Optional[int] = logging.get_logger(__name__)
def lowercase_ ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
def run_func(_UpperCamelCase ):
@wraps(_UpperCamelCase )
def run_in_eager_mode(*_UpperCamelCase , **_UpperCamelCase ):
return func(*_UpperCamelCase , **_UpperCamelCase )
@wraps(_UpperCamelCase )
@tf.function(experimental_compile=_UpperCamelCase )
def run_in_graph_mode(*_UpperCamelCase , **_UpperCamelCase ):
return func(*_UpperCamelCase , **_UpperCamelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def lowercase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowercase = random.Random()
__lowercase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(_UpperCamelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = "TensorFlow"
@property
def A ( self ) -> str:
'''simple docstring'''
return tf.__version__
def A ( self , snake_case_ , snake_case_ , snake_case_ ) -> float:
'''simple docstring'''
__lowercase = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
__lowercase = self._prepare_inference_func(snake_case_ , snake_case_ , snake_case_ )
return self._measure_speed(_inference )
def A ( self , snake_case_ , snake_case_ , snake_case_ ) -> float:
'''simple docstring'''
__lowercase = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
__lowercase = self._prepare_train_func(snake_case_ , snake_case_ , snake_case_ )
return self._measure_speed(_train )
def A ( self , snake_case_ , snake_case_ , snake_case_ ) -> [Memory, Optional[MemorySummary]]:
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , snake_case_ )
__lowercase = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
__lowercase = self._prepare_inference_func(snake_case_ , snake_case_ , snake_case_ )
return self._measure_memory(_inference )
def A ( self , snake_case_ , snake_case_ , snake_case_ ) -> [Memory, Optional[MemorySummary]]:
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , snake_case_ )
__lowercase = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
__lowercase = self._prepare_train_func(snake_case_ , snake_case_ , snake_case_ )
return self._measure_memory(_train )
def A ( self , snake_case_ , snake_case_ , snake_case_ ) -> Callable[[], None]:
'''simple docstring'''
__lowercase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
__lowercase = (
hasattr(snake_case_ , '''architectures''' )
and isinstance(config.architectures , snake_case_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__lowercase = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
__lowercase = __import__('''transformers''' , fromlist=[model_class] )
__lowercase = getattr(snake_case_ , snake_case_ )
__lowercase = model_cls(snake_case_ )
except ImportError:
raise ImportError(
F'{model_class} does not exist. If you just want to test the pretrained model, you might want to'
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
__lowercase = TF_MODEL_MAPPING[config.__class__](snake_case_ )
# encoder-decoder has vocab size saved differently
__lowercase = config.vocab_size if hasattr(snake_case_ , '''vocab_size''' ) else config.encoder.vocab_size
__lowercase = random_input_ids(snake_case_ , snake_case_ , snake_case_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(snake_case_ , decoder_input_ids=snake_case_ , training=snake_case_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(snake_case_ , training=snake_case_ )
__lowercase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def A ( self , snake_case_ , snake_case_ , snake_case_ ) -> Callable[[], None]:
'''simple docstring'''
__lowercase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' )
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
__lowercase = (
hasattr(snake_case_ , '''architectures''' )
and isinstance(config.architectures , snake_case_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__lowercase = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
__lowercase = __import__('''transformers''' , fromlist=[model_class] )
__lowercase = getattr(snake_case_ , snake_case_ )
__lowercase = model_cls(snake_case_ )
except ImportError:
raise ImportError(
F'{model_class} does not exist. If you just want to test the pretrained model, you might want to'
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
__lowercase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](snake_case_ )
# encoder-decoder has vocab size saved differently
__lowercase = config.vocab_size if hasattr(snake_case_ , '''vocab_size''' ) else config.encoder.vocab_size
__lowercase = random_input_ids(snake_case_ , snake_case_ , snake_case_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
__lowercase = model(snake_case_ , decoder_input_ids=snake_case_ , labels=snake_case_ , training=snake_case_ )[0]
__lowercase = tf.gradients(snake_case_ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
__lowercase = model(snake_case_ , labels=snake_case_ , training=snake_case_ )[0]
__lowercase = tf.gradients(snake_case_ , model.trainable_variables )
return gradients
__lowercase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def A ( self , snake_case_ ) -> float:
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' )
timeit.repeat(snake_case_ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
__lowercase = timeit.repeat(
snake_case_ , repeat=self.args.repeat , number=1_0 , )
return min(snake_case_ ) / 1_0.0
except ResourceExhaustedError as e:
self.print_fn(F'Doesn\'t fit on GPU. {e}' )
def A ( self , snake_case_ ) -> [Memory, MemorySummary]:
'''simple docstring'''
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''' )
__lowercase = start_memory_tracing('''transformers''' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''' )
__lowercase = '''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''' )
# init nvml
nvml.nvmlInit()
func()
__lowercase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
__lowercase = nvml.nvmlDeviceGetMemoryInfo(snake_case_ )
__lowercase = meminfo.used
__lowercase = Memory(snake_case_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''' )
__lowercase = None
else:
__lowercase = measure_peak_memory_cpu(snake_case_ )
__lowercase = Memory(snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
__lowercase = stop_memory_tracing(snake_case_ )
if memory is None:
__lowercase = summary.total
else:
__lowercase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'Doesn\'t fit on GPU. {e}' )
return "N/A", None
| 527 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , snake_case_=2 , snake_case_=3 , snake_case_=6_4 , snake_case_=None ) -> List[str]:
'''simple docstring'''
__lowercase = np.random.default_rng(snake_case_ )
__lowercase = length
__lowercase = rng.normal(size=(length,) ).astype(np.floataa )
__lowercase = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> Dict:
'''simple docstring'''
return self.length
def __getitem__( self , snake_case_ ) -> Union[str, Any]:
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class lowerCamelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_=0 , snake_case_=0 , snake_case_=False ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
__lowercase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__lowercase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__lowercase = True
def A ( self , snake_case_=None ) -> List[Any]:
'''simple docstring'''
if self.first_batch:
print(F'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
__lowercase = False
return x * self.a[0] + self.b[0]
class lowerCamelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_=0 , snake_case_=0 , snake_case_=False ) -> List[str]:
'''simple docstring'''
super().__init__()
__lowercase = torch.nn.Parameter(torch.tensor(snake_case_ ).float() )
__lowercase = torch.nn.Parameter(torch.tensor(snake_case_ ).float() )
__lowercase = True
def A ( self , snake_case_=None ) -> str:
'''simple docstring'''
if self.first_batch:
print(F'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
__lowercase = False
return x * self.a + self.b
def lowercase_ ( _UpperCamelCase , _UpperCamelCase = 16 ):
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
__lowercase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__lowercase = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
__lowercase = load_dataset('''csv''' , data_files=_UpperCamelCase )
__lowercase = datasets['''train'''].unique('''label''' )
__lowercase = {v: i for i, v in enumerate(_UpperCamelCase )}
def tokenize_function(_UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
__lowercase = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=_UpperCamelCase , max_length=_UpperCamelCase , padding='''max_length''' )
if "label" in examples:
__lowercase = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowercase = datasets.map(
_UpperCamelCase , batched=_UpperCamelCase , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(_UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_UpperCamelCase , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return tokenizer.pad(_UpperCamelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__lowercase = DataLoader(tokenized_datasets['''train'''] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=2 )
__lowercase = DataLoader(tokenized_datasets['''validation'''] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 527 | 1 |
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_lowercase = _symbol_database.Default()
_lowercase = _descriptor_pool.Default().AddSerializedFile(
B"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"""
)
_lowercase = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
_lowercase = None
_lowercase = B"""H\003"""
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
_lowercase = 45
_lowercase = 1581
_lowercase = 1517
_lowercase = 1570
_lowercase = 1584
_lowercase = 1793
_lowercase = 1795
_lowercase = 1916
_lowercase = 1864
_lowercase = 1905
_lowercase = 1919
_lowercase = 2429
_lowercase = 2208
_lowercase = 2418
_lowercase = 2323
_lowercase = 2407
# @@protoc_insertion_point(module_scope)
| 5 |
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> bool:
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''') )
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> bool:
UpperCamelCase__ : Tuple = credit_card_number
UpperCamelCase__ : List[str] = 0
UpperCamelCase__ : Any = len(__UpperCAmelCase ) - 2
for i in range(__UpperCAmelCase , -1 , -2 ):
# double the value of every second digit
UpperCamelCase__ : List[str] = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
UpperCamelCase__ : List[Any] = cc_number[:i] + str(__UpperCAmelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__UpperCAmelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> bool:
UpperCamelCase__ : str = f"{credit_card_number} is an invalid credit card number because"
if not credit_card_number.isdigit():
print(f"{error_message} it has nonnumerical characters." )
return False
if not 13 <= len(__UpperCAmelCase ) <= 16:
print(f"{error_message} of its length." )
return False
if not validate_initial_digits(__UpperCAmelCase ):
print(f"{error_message} of its first two digits." )
return False
if not luhn_validation(__UpperCAmelCase ):
print(f"{error_message} it fails the Luhn check." )
return False
print(f"{credit_card_number} is a valid credit card number." )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 253 | 0 |
from __future__ import annotations
def _lowerCAmelCase ( _a : list[int] ) -> list[int]: # This function is recursive
lowerCAmelCase_ : List[Any] = len(_a )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowerCAmelCase_ : Union[str, Any] = array[0]
lowerCAmelCase_ : str = False
lowerCAmelCase_ : Optional[int] = 1
lowerCAmelCase_ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : Tuple = [element for element in array[i:] if element >= array[i]]
lowerCAmelCase_ : Any = longest_subsequence(_a )
if len(_a ) > len(_a ):
lowerCAmelCase_ : List[Any] = temp_array
else:
i += 1
lowerCAmelCase_ : Tuple = [element for element in array[1:] if element >= pivot]
lowerCAmelCase_ : List[Any] = [pivot, *longest_subsequence(_a )]
if len(_a ) > len(_a ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 440 |
UpperCAmelCase_ : str = """Alexander Joslin"""
import operator as op
from .stack import Stack
def _lowerCAmelCase ( _a : str ) -> int:
lowerCAmelCase_ : Any = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
lowerCAmelCase_ : Stack[int] = Stack()
lowerCAmelCase_ : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_a ) )
elif i in operators:
# RULE 2
operator_stack.push(_a )
elif i == ")":
# RULE 4
lowerCAmelCase_ : Optional[int] = operator_stack.peek()
operator_stack.pop()
lowerCAmelCase_ : Union[str, Any] = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase_ : List[str] = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase_ : Dict = operators[opr](_a , _a )
operand_stack.push(_a )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
UpperCAmelCase_ : Dict = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 440 | 1 |
"""simple docstring"""
import math
import unittest
def snake_case_ ( A_ : int ):
'''simple docstring'''
assert isinstance(A_, A_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(A_ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(1_1 ) )
self.assertTrue(is_prime(1_3 ) )
self.assertTrue(is_prime(1_7 ) )
self.assertTrue(is_prime(1_9 ) )
self.assertTrue(is_prime(2_3 ) )
self.assertTrue(is_prime(2_9 ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaises(__lowerCAmelCase ):
is_prime(-1_9 )
self.assertFalse(
is_prime(0 ) , '''Zero doesn\'t have any positive factors, primes must have exactly two.''' , )
self.assertFalse(
is_prime(1 ) , '''One only has 1 positive factor, primes must have exactly two.''' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 83 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : str = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
_UpperCAmelCase : str = {
"vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"},
"merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"},
}
_UpperCAmelCase : List[str] = {
"ctrl": 256,
}
_UpperCAmelCase : int = {
"Pregnancy": 168_629,
"Christianity": 7_675,
"Explain": 106_423,
"Fitness": 63_440,
"Saving": 63_163,
"Ask": 27_171,
"Ass": 95_985,
"Joke": 163_509,
"Questions": 45_622,
"Thoughts": 49_605,
"Retail": 52_342,
"Feminism": 164_338,
"Writing": 11_992,
"Atheism": 192_263,
"Netflix": 48_616,
"Computing": 39_639,
"Opinion": 43_213,
"Alone": 44_967,
"Funny": 58_917,
"Gaming": 40_358,
"Human": 4_088,
"India": 1_331,
"Joker": 77_138,
"Diet": 36_206,
"Legal": 11_859,
"Norman": 4_939,
"Tip": 72_689,
"Weight": 52_343,
"Movies": 46_273,
"Running": 23_425,
"Science": 2_090,
"Horror": 37_793,
"Confession": 60_572,
"Finance": 12_250,
"Politics": 16_360,
"Scary": 191_985,
"Support": 12_654,
"Technologies": 32_516,
"Teenage": 66_160,
"Event": 32_769,
"Learned": 67_460,
"Notion": 182_770,
"Wikipedia": 37_583,
"Books": 6_665,
"Extract": 76_050,
"Confessions": 102_701,
"Conspiracy": 75_932,
"Links": 63_674,
"Narcissus": 150_425,
"Relationship": 54_766,
"Relationships": 134_796,
"Reviews": 41_671,
"News": 4_256,
"Translation": 26_820,
"multilingual": 128_406,
}
def lowerCAmelCase_ (lowercase__ : Optional[int] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = set()
lowerCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ = char
lowerCAmelCase__ = set(lowercase__ )
return pairs
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :int = VOCAB_FILES_NAMES
UpperCamelCase_ :str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ :Optional[int] = CONTROL_CODES
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<unk>" , **SCREAMING_SNAKE_CASE_ : Tuple ):
super().__init__(unk_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase__ = json.load(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {v: k for k, v in self.encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle:
lowerCAmelCase__ = merges_handle.read().split('''\n''' )[1:-1]
lowerCAmelCase__ = [tuple(merge.split() ) for merge in merges]
lowerCAmelCase__ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCAmelCase__ = {}
@property
def __snake_case ( self : List[str] ):
return len(self.encoder )
def __snake_case ( self : Union[str, Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Any ):
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowerCAmelCase__ = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
lowerCAmelCase__ = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ = bigram
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
lowerCAmelCase__ = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
lowerCAmelCase__ = get_pairs(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = '''@@ '''.join(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = word[:-4]
lowerCAmelCase__ = word
return word
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase__ = []
lowerCAmelCase__ = re.findall(R'''\S+\n?''' , SCREAMING_SNAKE_CASE_ )
for token in words:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) ) )
return split_tokens
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any ):
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ):
return self.decoder.get(SCREAMING_SNAKE_CASE_ , self.unk_token )
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase__ = ''' '''.join(SCREAMING_SNAKE_CASE_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
lowerCAmelCase__ = 0
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowerCAmelCase__ = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 668 | 0 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
a__ : Union[str, Any] = logging.getLogger(__name__)
class lowercase_ ( a__ ):
__UpperCAmelCase = 'sequence-classification'
def __init__( self , a ):
if type(a ) == dict:
UpperCamelCase__ = Namespace(**a )
UpperCamelCase__ = glue_output_modes[hparams.task]
UpperCamelCase__ = glue_tasks_num_labels[hparams.task]
super().__init__(a , a , self.mode )
def __a ( self , **a ):
return self.model(**a )
def __a ( self , a , a ):
UpperCamelCase__ = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
UpperCamelCase__ = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
UpperCamelCase__ = self(**a )
UpperCamelCase__ = outputs[0]
UpperCamelCase__ = self.trainer.lr_schedulers[0]["scheduler"]
UpperCamelCase__ = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __a ( self ):
UpperCamelCase__ = self.hparams
UpperCamelCase__ = processors[args.task]()
UpperCamelCase__ = processor.get_labels()
for mode in ["train", "dev"]:
UpperCamelCase__ = self._feature_file(a )
if os.path.exists(a ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , a )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
UpperCamelCase__ = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
UpperCamelCase__ = convert_examples_to_features(
a , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , a )
torch.save(a , a )
def __a ( self , a , a , a = False ):
UpperCamelCase__ = "dev" if mode == "test" else mode
UpperCamelCase__ = self._feature_file(a )
logger.info("Loading features from cached file %s" , a )
UpperCamelCase__ = torch.load(a )
UpperCamelCase__ = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCamelCase__ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
UpperCamelCase__ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
UpperCamelCase__ = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
UpperCamelCase__ = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(a , a , a , a ) , batch_size=a , shuffle=a , )
def __a ( self , a , a ):
UpperCamelCase__ = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
UpperCamelCase__ = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
UpperCamelCase__ = self(**a )
UpperCamelCase__ , UpperCamelCase__ = outputs[:2]
UpperCamelCase__ = logits.detach().cpu().numpy()
UpperCamelCase__ = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __a ( self , a ):
UpperCamelCase__ = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
UpperCamelCase__ = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
UpperCamelCase__ = np.argmax(a , axis=1 )
elif self.hparams.glue_output_mode == "regression":
UpperCamelCase__ = np.squeeze(a )
UpperCamelCase__ = np.concatenate([x["target"] for x in outputs] , axis=0 )
UpperCamelCase__ = [[] for _ in range(out_label_ids.shape[0] )]
UpperCamelCase__ = [[] for _ in range(out_label_ids.shape[0] )]
UpperCamelCase__ = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , a , a )}
UpperCamelCase__ = dict(results.items() )
UpperCamelCase__ = results
return ret, preds_list, out_label_list
def __a ( self , a ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._eval_end(a )
UpperCamelCase__ = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __a ( self , a ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._eval_end(a )
UpperCamelCase__ = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __a ( a , a ):
BaseTransformer.add_model_specific_args(a , a )
parser.add_argument(
"--max_seq_length" , default=1_28 , type=a , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=a , required=a , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=a , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def _UpperCamelCase ( ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = argparse.ArgumentParser()
add_generic_args(__A , os.getcwd() )
UpperCamelCase__ = GLUETransformer.add_model_specific_args(__A , os.getcwd() )
UpperCamelCase__ = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
UpperCamelCase__ = os.path.join(
"./results" , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
UpperCamelCase__ = GLUETransformer(__A )
UpperCamelCase__ = generic_train(__A , __A )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
UpperCamelCase__ = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=__A ) )
UpperCamelCase__ = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__A )
if __name__ == "__main__":
main()
| 223 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __A , __A , __A , __A ) -> None:
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
UpperCamelCase__ , UpperCamelCase__ = array[indexa], array[indexa]
def _UpperCamelCase ( __A , __A , __A , __A ) -> None:
'''simple docstring'''
if length > 1:
UpperCamelCase__ = int(length / 2 )
for i in range(__A , low + middle ):
comp_and_swap(__A , __A , i + middle , __A )
bitonic_merge(__A , __A , __A , __A )
bitonic_merge(__A , low + middle , __A , __A )
def _UpperCamelCase ( __A , __A , __A , __A ) -> None:
'''simple docstring'''
if length > 1:
UpperCamelCase__ = int(length / 2 )
bitonic_sort(__A , __A , __A , 1 )
bitonic_sort(__A , low + middle , __A , 0 )
bitonic_merge(__A , __A , __A , __A )
if __name__ == "__main__":
a__ : Optional[int] = input('Enter numbers separated by a comma:\n').strip()
a__ : Any = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 223 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=18 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
'''simple docstring'''
__lowercase = size if size is not None else {"shortest_edge": 18}
__lowercase = crop_size if crop_size is not None else {"height": 18, "width": 18}
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize
__lowercase = size
__lowercase = do_center_crop
__lowercase = crop_size
__lowercase = do_normalize
__lowercase = image_mean
__lowercase = image_std
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __a ( __a , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = LevitImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = LevitImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_center_crop" ) )
self.assertTrue(hasattr(_lowerCamelCase , "size" ) )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
# Initialize image_processing
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowercase = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
# Initialize image_processing
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowercase = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
# Initialize image_processing
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowercase = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 118 |
"""simple docstring"""
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __a ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
# A mock response for an HTTP head request to emulate server down
__lowercase = mock.Mock()
__lowercase = 500
__lowercase = {}
__lowercase = HTTPError
__lowercase = {}
# Download this model to make sure it's in the cache.
__lowercase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=_lowerCamelCase ) as mock_head:
__lowercase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
# A mock response for an HTTP head request to emulate server down
__lowercase = mock.Mock()
__lowercase = 500
__lowercase = {}
__lowercase = HTTPError
__lowercase = {}
# Download this model to make sure it's in the cache.
__lowercase = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=_lowerCamelCase ) as mock_head:
__lowercase = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
# This test is for deprecated behavior and can be removed in v5
try:
__lowercase = tempfile.mktemp()
with open(_lowerCamelCase , "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" , _lowerCamelCase )
__lowercase = AlbertTokenizer.from_pretrained(_lowerCamelCase )
finally:
os.remove(_lowerCamelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" , "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" , _lowerCamelCase )
__lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
# This test is for deprecated behavior and can be removed in v5
__lowercase = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class __a ( unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase : str = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def SCREAMING_SNAKE_CASE ( cls ) -> List[Any]:
'''simple docstring'''
__lowercase = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls ) -> List[str]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = os.path.join(_lowerCamelCase , "vocab.txt" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
__lowercase = BertTokenizer(_lowerCamelCase )
tokenizer.push_to_hub("test-tokenizer" , use_auth_token=self._token )
__lowercase = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCamelCase , repo_id="test-tokenizer" , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
__lowercase = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = os.path.join(_lowerCamelCase , "vocab.txt" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
__lowercase = BertTokenizer(_lowerCamelCase )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" , use_auth_token=self._token )
__lowercase = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
_lowerCamelCase , repo_id="valid_org/test-tokenizer-org" , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
__lowercase = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = os.path.join(_lowerCamelCase , "vocab.txt" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
__lowercase = CustomTokenizer(_lowerCamelCase )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
__lowercase = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = os.path.join(_lowerCamelCase , "vocab.txt" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
__lowercase = BertTokenizerFast.from_pretrained(_lowerCamelCase )
bert_tokenizer.save_pretrained(_lowerCamelCase )
__lowercase = CustomTokenizerFast.from_pretrained(_lowerCamelCase )
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
__lowercase = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizerFast" )
__lowercase = AutoTokenizer.from_pretrained(
f'''{USER}/test-dynamic-tokenizer''' , use_fast=_lowerCamelCase , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
class __a ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS]", " This is a ", "extra_id_100"] )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) , ["A", "BC"] )
self.assertEqual(trie.split("BCA" ) , ["BC", "A"] )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) , ["AB", "C"] )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) , ["ABC", "D"] )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
# Even if the offsets are wrong, we necessarily output correct string
# parts.
__lowercase = Trie()
__lowercase = trie.cut_text("ABC" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(_lowerCamelCase , ["AB", "C"] )
| 118 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class __A ( unittest.TestCase ):
def __init__(self : int , __a : List[Any] , __a : str=7 , __a : int=3 , __a : Optional[int]=18 , __a : Dict=30 , __a : Tuple=400 , __a : Tuple=True , __a : str=None , __a : Any=True , __a : List[str]=None , __a : Dict=True , __a : str=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , __a : List[str]=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , __a : List[str]=True , ):
UpperCAmelCase_ = size if size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 18, "width": 18}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
UpperCAmelCase_ = do_convert_rgb
def _lowercase (self : List[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def _lowercase (self : Tuple , __a : str=False , __a : List[str]=False , __a : List[Any]=False ):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
UpperCAmelCase_ = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
UpperCAmelCase_ = []
for i in range(self.batch_size ):
UpperCAmelCase_ , UpperCAmelCase_ = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
UpperCAmelCase_ = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
if torchify:
UpperCAmelCase_ = [torch.from_numpy(__a ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : str = ChineseCLIPImageProcessor if is_vision_available() else None
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = ChineseCLIPImageProcessingTester(self , do_center_crop=__a )
@property
def _lowercase (self : str ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase (self : int ):
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , "do_resize" ) )
self.assertTrue(hasattr(__a , "size" ) )
self.assertTrue(hasattr(__a , "do_center_crop" ) )
self.assertTrue(hasattr(__a , "center_crop" ) )
self.assertTrue(hasattr(__a , "do_normalize" ) )
self.assertTrue(hasattr(__a , "image_mean" ) )
self.assertTrue(hasattr(__a , "image_std" ) )
self.assertTrue(hasattr(__a , "do_convert_rgb" ) )
def _lowercase (self : Dict ):
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 224, "width": 224} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def _lowercase (self : Optional[Any] ):
pass
def _lowercase (self : Optional[Any] ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase (self : Union[str, Any] ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase (self : Union[str, Any] ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : List[str] = ChineseCLIPImageProcessor if is_vision_available() else None
def _lowercase (self : List[str] ):
UpperCAmelCase_ = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__a )
UpperCAmelCase_ = 3
@property
def _lowercase (self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , "do_resize" ) )
self.assertTrue(hasattr(__a , "size" ) )
self.assertTrue(hasattr(__a , "do_center_crop" ) )
self.assertTrue(hasattr(__a , "center_crop" ) )
self.assertTrue(hasattr(__a , "do_normalize" ) )
self.assertTrue(hasattr(__a , "image_mean" ) )
self.assertTrue(hasattr(__a , "image_std" ) )
self.assertTrue(hasattr(__a , "do_convert_rgb" ) )
def _lowercase (self : Optional[int] ):
pass
def _lowercase (self : Dict ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 415 | '''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_: Dict ={
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: int =[
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 415 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='LayoutLMv3ImageProcessor'
__a =('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self : int , __a : str=None , __a : str=None , **__a : List[Any] ):
_a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Union[str, Any] , __a : Optional[int] , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __a : Union[List[List[int]], List[List[List[int]]]] = None , __a : Optional[Union[List[int], List[List[int]]]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Any , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
_a = self.image_processor(images=__a , return_tensors=__a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__a , __a ):
_a = [text] # add batch dimension (as the image processor always adds a batch dimension)
_a = features["words"]
_a = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
# add pixel values
_a = features.pop("pixel_values" )
if return_overflowing_tokens is True:
_a = self.get_overflowing_images(__a , encoded_inputs["overflow_to_sample_mapping"] )
_a = images
return encoded_inputs
def UpperCamelCase__ ( self : int , __a : List[Any] , __a : Dict ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__a ) != len(__a ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f' {len(__a )} and {len(__a )}' )
return images_with_overflow
def UpperCamelCase__ ( self : List[Any] , *__a : Optional[Any] , **__a : Union[str, Any] ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : Dict , *__a : Union[str, Any] , **__a : Dict ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : int ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def UpperCamelCase__ ( self : Dict ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self : Union[str, Any] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 692 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _lowerCamelCase ( lowercase : Any ) -> Tuple:
_a = filter(lambda lowercase : p.requires_grad , model.parameters() )
_a = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase_ : str = logging.getLogger(__name__)
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Union[str, Any] ) -> Dict:
if metric == "rouge2":
_a = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_a = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_a = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
" function." )
_a = ModelCheckpoint(
dirpath=lowercase , filename=lowercase , monitor=F'val_{metric}' , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : List[str] ) -> Dict:
return EarlyStopping(
monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=lowercase , verbose=lowercase , )
class __SCREAMING_SNAKE_CASE (pl.Callback ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[Any] , __a : Dict , __a : Optional[int] ):
_a = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__a )
@rank_zero_only
def UpperCamelCase__ ( self : Any , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Optional[int]=True ):
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
_a = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_a = Path(pl_module.hparams.output_dir )
if type_path == "test":
_a = od / "test_results.txt"
_a = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_a = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
_a = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=__a )
generations_file.parent.mkdir(exist_ok=__a )
with open(__a , "a+" ) as writer:
for key in sorted(__a ):
if key in ["log", "progress_bar", "preds"]:
continue
_a = metrics[key]
if isinstance(__a , torch.Tensor ):
_a = val.item()
_a = f'{key}: {val:.6f}\n'
writer.write(__a )
if not save_generations:
return
if "preds" in metrics:
_a = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__a )
@rank_zero_only
def UpperCamelCase__ ( self : List[str] , __a : Optional[Any] , __a : List[str] ):
try:
_a = pl_module.model.model.num_parameters()
except AttributeError:
_a = pl_module.model.num_parameters()
_a = count_trainable_parameters(__a )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase__ ( self : Dict , __a : pl.Trainer , __a : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__a , __a , "test" )
@rank_zero_only
def UpperCamelCase__ ( self : Any , __a : pl.Trainer , __a : Optional[int] ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 692 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def __a ( __UpperCAmelCase=None ):
a__ = argparse.ArgumentParser(add_help=__UpperCAmelCase , allow_abbrev=__UpperCAmelCase )
# The main config parser
a__ = config_command_parser(__UpperCAmelCase )
# The subparser to add commands to
a__ = config_parser.add_subparsers(title='''subcommands''' , dest='''subcommand''' )
# Then add other parsers with the parent parser
default_command_parser(__UpperCAmelCase , parents=[parent_parser] )
update_command_parser(__UpperCAmelCase , parents=[parent_parser] )
return config_parser
def __a ( ):
a__ = get_config_parser()
a__ = config_parser.parse_args()
if not hasattr(__UpperCAmelCase , '''func''' ):
config_parser.print_help()
exit(1 )
# Run
args.func(__UpperCAmelCase )
if __name__ == "__main__":
main()
| 711 |
from __future__ import annotations
def __a ( __UpperCAmelCase , __UpperCAmelCase ):
a__ = get_failure_array(__UpperCAmelCase )
# 2) Step through text searching for pattern
a__ , a__ = 0, 0 # index into text, pattern
while i < len(__UpperCAmelCase ):
if pattern[j] == text[i]:
if j == (len(__UpperCAmelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
a__ = failure[j - 1]
continue
i += 1
return False
def __a ( __UpperCAmelCase ):
a__ = [0]
a__ = 0
a__ = 1
while j < len(__UpperCAmelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
a__ = failure[i - 1]
continue
j += 1
failure.append(__UpperCAmelCase )
return failure
if __name__ == "__main__":
# Test 1)
a_ : Tuple = 'abc1abc12'
a_ : Optional[Any] = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
a_ : Optional[Any] = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
a_ : Any = 'ABABX'
a_ : Any = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
a_ : Union[str, Any] = 'AAAB'
a_ : int = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
a_ : Tuple = 'abcdabcy'
a_ : Optional[Any] = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
a_ : Dict = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 148 | 0 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase_ ( self : Tuple ):
_A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_A = 'xvjiarui/stable-diffusion-2-inpainting'
_A , _A = FlaxStableDiffusionInpaintPipeline.from_pretrained(_UpperCAmelCase , safety_checker=_UpperCAmelCase )
_A = 'Face of a yellow cat, high resolution, sitting on a park bench'
_A = jax.random.PRNGKey(0 )
_A = 50
_A = jax.device_count()
_A = num_samples * [prompt]
_A = num_samples * [init_image]
_A = num_samples * [mask_image]
_A , _A , _A = pipeline.prepare_inputs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# shard inputs and rng
_A = replicate(_UpperCAmelCase )
_A = jax.random.split(_UpperCAmelCase , jax.device_count() )
_A = shard(_UpperCAmelCase )
_A = shard(_UpperCAmelCase )
_A = shard(_UpperCAmelCase )
_A = pipeline(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase )
_A = output.images.reshape(_UpperCAmelCase , 512 , 512 , 3 )
_A = images[0, 253:256, 253:256, -1]
_A = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_A = jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 7 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase ={
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase =[
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 617 | 0 |
from math import factorial
def UpperCAmelCase ( lowercase__ : int = 100 ):
'''simple docstring'''
return sum(map(lowercase__ , str(factorial(lowercase__ ) ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 712 |
def UpperCAmelCase ( lowercase__ : float , lowercase__ : float ):
'''simple docstring'''
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 412 | 0 |
"""simple docstring"""
import logging
from transformers import PretrainedConfig
lowerCamelCase__ : Optional[int] = logging.getLogger(__name__)
lowerCamelCase__ : str = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class _UpperCAmelCase ( __a):
__a : Optional[int] = """bertabs"""
def __init__( self , _A=3_05_22 , _A=5_12 , _A=6 , _A=5_12 , _A=8 , _A=5_12 , _A=0.2 , _A=6 , _A=7_68 , _A=8 , _A=20_48 , _A=0.2 , **_A , ) -> str:
'''simple docstring'''
super().__init__(**_A )
_UpperCAmelCase : Optional[Any] = vocab_size
_UpperCAmelCase : List[Any] = max_pos
_UpperCAmelCase : List[Any] = enc_layers
_UpperCAmelCase : Optional[Any] = enc_hidden_size
_UpperCAmelCase : int = enc_heads
_UpperCAmelCase : Union[str, Any] = enc_ff_size
_UpperCAmelCase : List[Any] = enc_dropout
_UpperCAmelCase : List[Any] = dec_layers
_UpperCAmelCase : Optional[int] = dec_hidden_size
_UpperCAmelCase : Dict = dec_heads
_UpperCAmelCase : Optional[int] = dec_ff_size
_UpperCAmelCase : Tuple = dec_dropout
| 238 |
"""simple docstring"""
import argparse
import datetime
def UpperCamelCase ( _lowerCAmelCase : str ) -> str:
_UpperCAmelCase : List[str] = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
_UpperCAmelCase : List[str] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(_lowerCAmelCase ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
_UpperCAmelCase : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
_UpperCAmelCase : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
_UpperCAmelCase : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
_UpperCAmelCase : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
_UpperCAmelCase : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
_UpperCAmelCase : List[str] = datetime.date(int(_lowerCAmelCase ), int(_lowerCAmelCase ), int(_lowerCAmelCase ) )
# Start math
if m <= 2:
_UpperCAmelCase : int = y - 1
_UpperCAmelCase : List[str] = m + 12
# maths var
_UpperCAmelCase : int = int(str(_lowerCAmelCase )[:2] )
_UpperCAmelCase : int = int(str(_lowerCAmelCase )[2:] )
_UpperCAmelCase : int = int(2.6 * m - 5.39 )
_UpperCAmelCase : int = int(c / 4 )
_UpperCAmelCase : int = int(k / 4 )
_UpperCAmelCase : int = int(d + k )
_UpperCAmelCase : int = int(t + u + v + x )
_UpperCAmelCase : int = int(z - (2 * c) )
_UpperCAmelCase : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
_UpperCAmelCase : str = f'''Your date {date_input}, is a {days[str(_lowerCAmelCase )]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ : str = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
lowerCamelCase__ : int = parser.parse_args()
zeller(args.date_input)
| 238 | 1 |
"""simple docstring"""
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 9, 14 # noqa: F841
SCREAMING_SNAKE_CASE__ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
SCREAMING_SNAKE_CASE__ = defaultdict(__UpperCAmelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
SCREAMING_SNAKE_CASE__ = mst(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
SCREAMING_SNAKE_CASE__ = tuple(answer[:2] )
SCREAMING_SNAKE_CASE__ = tuple(edge[::-1] )
assert edge in result or reverse in result
| 538 | """simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> List[str]:
return 1 / (1 + np.exp(-z ))
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
return (-y * np.log(__UpperCAmelCase ) - (1 - y) * np.log(1 - h )).mean()
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
SCREAMING_SNAKE_CASE__ = np.dot(__UpperCAmelCase , __UpperCAmelCase )
return np.sum(y * scores - np.log(1 + np.exp(__UpperCAmelCase ) ) )
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=70_000 ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = np.zeros(x.shape[1] )
for iterations in range(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = np.dot(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = sigmoid_function(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = np.dot(x.T , h - y ) / y.size
SCREAMING_SNAKE_CASE__ = theta - alpha * gradient # updating the weights
SCREAMING_SNAKE_CASE__ = np.dot(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = sigmoid_function(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = cost_function(__UpperCAmelCase , __UpperCAmelCase )
if iterations % 100 == 0:
print(F"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
_A = datasets.load_iris()
_A = iris.data[:, :2]
_A = (iris.target != 0) * 1
_A = 0.1
_A = logistic_reg(alpha, x, y, max_iterations=7_0_0_0_0)
print('theta: ', theta) # printing the theta i.e our weights vector
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> Union[str, Any]:
return sigmoid_function(
np.dot(__UpperCAmelCase , __UpperCAmelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(1_0, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((_A) , (_A)) = (x[:, 0].min(), x[:, 0].max())
((_A) , (_A)) = (x[:, 1].min(), x[:, 1].max())
((_A) , (_A)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
_A = np.c_[xxa.ravel(), xxa.ravel()]
_A = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 538 | 1 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__magic_name__ = random.Random()
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=1.0 , UpperCamelCase__=None , UpperCamelCase__=None ):
"""simple docstring"""
if rng is None:
_UpperCAmelCase = global_rng
_UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _lowerCAmelCase ( unittest.TestCase ):
def __init__( self , a_ , a_=7 , a_=400 , a_=2000 , a_=24 , a_=24 , a_=0.0 , a_=16000 , a_=True , a_=True , ) -> str:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = min_seq_length
_UpperCAmelCase = max_seq_length
_UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase = feature_size
_UpperCAmelCase = num_mel_bins
_UpperCAmelCase = padding_value
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = return_attention_mask
_UpperCAmelCase = do_normalize
def _a ( self ) -> str:
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _a ( self , a_=False , a_=False ) -> List[Any]:
def _flatten(a_ ):
return list(itertools.chain(*a_ ) )
if equal_length:
_UpperCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_UpperCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCAmelCase = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCAmelCase ( lowerCamelCase , unittest.TestCase ):
lowercase_ : int = SpeechaTextFeatureExtractor if is_speech_available() else None
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = SpeechaTextFeatureExtractionTester(self )
def _a ( self , a_ ) -> Union[str, Any]:
self.assertTrue(np.all(np.mean(a_ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a_ , axis=0 ) - 1 ) < 1e-3 ) )
def _a ( self ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCAmelCase = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test feature size
_UpperCAmelCase = feature_extractor(a_ , padding=a_ , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
_UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test batched
_UpperCAmelCase = feature_extractor(a_ , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(a_ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_UpperCAmelCase = np.asarray(a_ )
_UpperCAmelCase = feature_extractor(a_ , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(a_ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
def _a ( self ) -> str:
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCAmelCase = ["longest", "max_length", "do_not_pad"]
_UpperCAmelCase = [None, 16, None]
for max_length, padding in zip(a_ , a_ ):
_UpperCAmelCase = feature_extractor(
a_ , padding=a_ , max_length=a_ , return_attention_mask=a_ )
_UpperCAmelCase = inputs.input_features
_UpperCAmelCase = inputs.attention_mask
_UpperCAmelCase = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCAmelCase = ["longest", "max_length", "do_not_pad"]
_UpperCAmelCase = [None, 16, None]
for max_length, padding in zip(a_ , a_ ):
_UpperCAmelCase = feature_extractor(
a_ , max_length=a_ , padding=a_ , return_tensors="np" , return_attention_mask=a_ )
_UpperCAmelCase = inputs.input_features
_UpperCAmelCase = inputs.attention_mask
_UpperCAmelCase = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCAmelCase = feature_extractor(
a_ , padding="max_length" , max_length=4 , truncation=a_ , return_tensors="np" , return_attention_mask=a_ , )
_UpperCAmelCase = inputs.input_features
_UpperCAmelCase = inputs.attention_mask
_UpperCAmelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def _a ( self ) -> int:
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCAmelCase = feature_extractor(
a_ , padding="longest" , max_length=4 , truncation=a_ , return_tensors="np" , return_attention_mask=a_ , )
_UpperCAmelCase = inputs.input_features
_UpperCAmelCase = inputs.attention_mask
_UpperCAmelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCAmelCase = feature_extractor(
a_ , padding="longest" , max_length=16 , truncation=a_ , return_tensors="np" , return_attention_mask=a_ , )
_UpperCAmelCase = inputs.input_features
_UpperCAmelCase = inputs.attention_mask
_UpperCAmelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def _a ( self ) -> int:
import torch
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = np.random.rand(100 , 32 ).astype(np.floataa )
_UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _a ( self , a_ ) -> List[str]:
from datasets import load_dataset
_UpperCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
_UpperCAmelCase = ds.sort("id" ).select(range(a_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _a ( self ) -> Union[str, Any]:
# fmt: off
_UpperCAmelCase = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
_UpperCAmelCase = self._load_datasamples(1 )
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = feature_extractor(a_ , return_tensors="pt" ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , a_ , atol=1e-4 ) )
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
try:
_UpperCAmelCase = float(UpperCamelCase__ )
except ValueError:
raise ValueError("Please enter a valid number" )
_UpperCAmelCase = decimal - int(UpperCamelCase__ )
if fractional_part == 0:
return int(UpperCamelCase__ ), 1
else:
_UpperCAmelCase = len(str(UpperCamelCase__ ).split("." )[1] )
_UpperCAmelCase = int(decimal * (10**number_of_frac_digits) )
_UpperCAmelCase = 10**number_of_frac_digits
_UpperCAmelCase , _UpperCAmelCase = denominator, numerator
while True:
_UpperCAmelCase = dividend % divisor
if remainder == 0:
break
_UpperCAmelCase , _UpperCAmelCase = divisor, remainder
_UpperCAmelCase , _UpperCAmelCase = numerator / divisor, denominator / divisor
return int(UpperCamelCase__ ), int(UpperCamelCase__ )
if __name__ == "__main__":
print(f'''{decimal_to_fraction(2) = }''')
print(f'''{decimal_to_fraction(89.0) = }''')
print(f'''{decimal_to_fraction("67") = }''')
print(f'''{decimal_to_fraction("45.0") = }''')
print(f'''{decimal_to_fraction(1.5) = }''')
print(f'''{decimal_to_fraction("6.25") = }''')
print(f'''{decimal_to_fraction("78td") = }''')
| 657 | 1 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
lowercase__ = {
"gpt-neox-20b": 2048,
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase_ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , lowercase_ : Any=None , lowercase_ : Dict=None , lowercase_ : List[str]=None , lowercase_ : List[Any]="<|endoftext|>" , lowercase_ : List[str]="<|endoftext|>" , lowercase_ : Any="<|endoftext|>" , lowercase_ : List[str]=False , **lowercase_ : Union[str, Any] , ) -> str:
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , )
UpperCAmelCase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowercase_ ) != add_prefix_space:
UpperCAmelCase : Tuple = getattr(lowercase_ , pre_tok_state.pop('type' ) )
UpperCAmelCase : Optional[Any] = add_prefix_space
UpperCAmelCase : Tuple = pre_tok_class(**lowercase_ )
UpperCAmelCase : Any = add_prefix_space
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase : Optional[int] = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : "Conversation" ) -> List[int]:
UpperCAmelCase : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase_ , add_special_tokens=lowercase_ ) + [self.eos_token_id] )
if len(lowercase_ ) > self.model_max_length:
UpperCAmelCase : int = input_ids[-self.model_max_length :]
return input_ids
| 695 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowercase__ = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
lowercase__ = json.load(f)
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Dict , lowercase_ : Dict ) -> Tuple:
return FSMTTokenizer.from_pretrained(lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> Tuple:
UpperCAmelCase : Optional[Any] = FSMTForConditionalGeneration.from_pretrained(lowercase_ ).to(lowercase_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def UpperCAmelCase_ ( self : List[str] , lowercase_ : int , lowercase_ : Any ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
UpperCAmelCase : List[str] = f"""facebook/wmt19-{pair}"""
UpperCAmelCase : Optional[int] = self.get_tokenizer(lowercase_ )
UpperCAmelCase : int = self.get_model(lowercase_ )
UpperCAmelCase : List[Any] = bleu_data[pair]['src']
UpperCAmelCase : Optional[int] = bleu_data[pair]['tgt']
UpperCAmelCase : Any = tokenizer(lowercase_ , return_tensors='pt' , truncation=lowercase_ , padding='longest' ).to(lowercase_ )
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase : List[Any] = tokenizer.batch_decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
UpperCAmelCase : Any = calculate_bleu(lowercase_ , lowercase_ )
print(lowercase_ )
self.assertGreaterEqual(scores['bleu'] , lowercase_ )
| 695 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.