code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""):
raise Exception("""requires fairseq >= 1.0.0a""")
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : str = """Hello world! cécé herlolip"""
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = FairseqRobertaModel.from_pretrained(lowerCamelCase )
roberta.eval() # disable dropout
__lowercase = roberta.model.encoder.sentence_encoder
__lowercase = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
__lowercase = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""" , lowerCamelCase )
__lowercase = XLMRobertaXLForSequenceClassification(lowerCamelCase ) if classification_head else XLMRobertaXLForMaskedLM(lowerCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowercase = roberta_sent_encoder.embed_tokens.weight
__lowercase = roberta_sent_encoder.embed_positions.weight
__lowercase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
__lowercase = roberta_sent_encoder.layer_norm.weight
__lowercase = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowercase = model.roberta.encoder.layer[i]
__lowercase = roberta_sent_encoder.layers[i]
__lowercase = layer.attention
__lowercase = roberta_layer.self_attn_layer_norm.weight
__lowercase = roberta_layer.self_attn_layer_norm.bias
# self attention
__lowercase = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
__lowercase = roberta_layer.self_attn.q_proj.weight
__lowercase = roberta_layer.self_attn.q_proj.bias
__lowercase = roberta_layer.self_attn.k_proj.weight
__lowercase = roberta_layer.self_attn.k_proj.bias
__lowercase = roberta_layer.self_attn.v_proj.weight
__lowercase = roberta_layer.self_attn.v_proj.bias
# self-attention output
__lowercase = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
__lowercase = roberta_layer.self_attn.out_proj.weight
__lowercase = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
__lowercase = roberta_layer.final_layer_norm.weight
__lowercase = roberta_layer.final_layer_norm.bias
# intermediate
__lowercase = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
__lowercase = roberta_layer.fca.weight
__lowercase = roberta_layer.fca.bias
# output
__lowercase = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
__lowercase = roberta_layer.fca.weight
__lowercase = roberta_layer.fca.bias
# end of layer
if classification_head:
__lowercase = roberta.model.classification_heads["""mnli"""].dense.weight
__lowercase = roberta.model.classification_heads["""mnli"""].dense.bias
__lowercase = roberta.model.classification_heads["""mnli"""].out_proj.weight
__lowercase = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
__lowercase = roberta.model.encoder.lm_head.dense.weight
__lowercase = roberta.model.encoder.lm_head.dense.bias
__lowercase = roberta.model.encoder.lm_head.layer_norm.weight
__lowercase = roberta.model.encoder.lm_head.layer_norm.bias
__lowercase = roberta.model.encoder.lm_head.weight
__lowercase = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowercase = roberta.encode(lowerCamelCase ).unsqueeze(0 ) # batch of size 1
__lowercase = model(lowerCamelCase )[0]
if classification_head:
__lowercase = roberta.model.classification_heads["""mnli"""](roberta.extract_features(lowerCamelCase ) )
else:
__lowercase = roberta.model(lowerCamelCase )[0]
print(our_output.shape , their_output.shape )
__lowercase = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
__lowercase = torch.allclose(lowerCamelCase , lowerCamelCase , atol=1e-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(lowerCamelCase ).mkdir(parents=lowerCamelCase , exist_ok=lowerCamelCase )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 80 | class _a :
'''simple docstring'''
def __init__( self , __UpperCAmelCase ):
__A : List[str] = val
__A : str = None
__A : List[Any] = None
def __UpperCAmelCase( self , __UpperCAmelCase ):
if self.val:
if val < self.val:
if self.left is None:
__A : int = Node(__UpperCAmelCase )
else:
self.left.insert(__UpperCAmelCase )
elif val > self.val:
if self.right is None:
__A : int = Node(__UpperCAmelCase )
else:
self.right.insert(__UpperCAmelCase )
else:
__A : Any = val
def lowerCamelCase_ ( _lowercase , _lowercase ) -> Tuple:
# Recursive traversal
if root:
inorder(root.left , _lowercase )
res.append(root.val )
inorder(root.right , _lowercase )
def lowerCamelCase_ ( _lowercase ) -> str:
# Build BST
if len(_lowercase ) == 0:
return arr
__A : Union[str, Any] = Node(arr[0] )
for i in range(1 , len(_lowercase ) ):
root.insert(arr[i] )
# Traverse BST in order.
__A : str = []
inorder(_lowercase , _lowercase )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 520 | 0 |
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _snake_case ( lowercase__ , unittest.TestCase):
# TODO: is there an appropriate internal test set?
UpperCamelCase__ : List[Any] ="""ssube/stable-diffusion-x4-upscaler-onnx"""
def A__ ( self : List[str], __lowercase : Union[str, Any]=0 ):
lowercase__ = floats_tensor((1, 3, 128, 128), rng=random.Random(__lowercase ) )
lowercase__ = torch.manual_seed(__lowercase )
lowercase__ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def A__ ( self : Any ):
lowercase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=__lowercase )
lowercase__ = self.get_dummy_inputs()
lowercase__ = pipe(**__lowercase ).images
lowercase__ = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def A__ ( self : str ):
lowercase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
lowercase__ = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowercase__ = self.get_dummy_inputs()
lowercase__ = pipe(**__lowercase ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array(
[0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def A__ ( self : Tuple ):
lowercase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
lowercase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowercase )
lowercase__ = self.get_dummy_inputs()
lowercase__ = pipe(**__lowercase ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array(
[0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def A__ ( self : Tuple ):
lowercase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
lowercase__ = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowercase )
lowercase__ = self.get_dummy_inputs()
lowercase__ = pipe(**__lowercase ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def A__ ( self : Any ):
lowercase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
lowercase__ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowercase )
lowercase__ = self.get_dummy_inputs()
lowercase__ = pipe(**__lowercase ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array(
[0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _snake_case ( unittest.TestCase):
@property
def A__ ( self : List[Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def A__ ( self : Dict ):
lowercase__ = ort.SessionOptions()
lowercase__ = False
return options
def A__ ( self : Any ):
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowercase__ = init_image.resize((128, 128) )
# using the PNDM scheduler by default
lowercase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx", provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=__lowercase )
lowercase__ = "A fantasy landscape, trending on artstation"
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe(
prompt=__lowercase, image=__lowercase, guidance_scale=7.5, num_inference_steps=10, generator=__lowercase, output_type="np", )
lowercase__ = output.images
lowercase__ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def A__ ( self : Any ):
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowercase__ = init_image.resize((128, 128) )
lowercase__ = LMSDiscreteScheduler.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx", subfolder="scheduler" )
lowercase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx", scheduler=__lowercase, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=__lowercase )
lowercase__ = "A fantasy landscape, trending on artstation"
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe(
prompt=__lowercase, image=__lowercase, guidance_scale=7.5, num_inference_steps=20, generator=__lowercase, output_type="np", )
lowercase__ = output.images
lowercase__ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowercase__ = np.array(
[0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 706 |
import math
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(SCREAMING_SNAKE_CASE_ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowercase_ = """Enter the base and the power separated by a comma: """
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowercase_ = res(xa, ya)
lowercase_ = res(xa, ya)
# We check for the largest number
if resa > resa:
print("""Largest number is""", xa, """^""", ya)
elif resa > resa:
print("""Largest number is""", xa, """^""", ya)
else:
print("""Both are equal""")
| 37 | 0 |
'''simple docstring'''
import os
UpperCamelCase_ = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00}
def lowerCamelCase ( UpperCAmelCase__ : str ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = 0
SCREAMING_SNAKE_CASE__ :Dict = 0
while index < len(UpperCAmelCase__ ) - 1:
SCREAMING_SNAKE_CASE__ :Optional[Any] = SYMBOLS[numerals[index]]
SCREAMING_SNAKE_CASE__ :Any = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def lowerCamelCase ( UpperCAmelCase__ : int ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = ''
SCREAMING_SNAKE_CASE__ :Dict = num // 1_0_0_0
numerals += m_count * "M"
num %= 1_0_0_0
SCREAMING_SNAKE_CASE__ :Dict = num // 1_0_0
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_0_0
SCREAMING_SNAKE_CASE__ :int = num // 1_0
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 1_0
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def lowerCamelCase ( UpperCAmelCase__ : str = "/p089_roman.txt" ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = 0
with open(os.path.dirname(UpperCAmelCase__ ) + roman_numerals_filename ) as filea:
SCREAMING_SNAKE_CASE__ :Optional[int] = filea.readlines()
for line in lines:
SCREAMING_SNAKE_CASE__ :List[str] = line.strip()
SCREAMING_SNAKE_CASE__ :str = parse_roman_numerals(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :Dict = generate_roman_numerals(UpperCAmelCase__ )
savings += len(UpperCAmelCase__ ) - len(UpperCAmelCase__ )
return savings
if __name__ == "__main__":
print(f"{solution() = }")
| 209 | '''simple docstring'''
from __future__ import annotations
UpperCamelCase_ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def lowerCamelCase ( UpperCAmelCase__ : list[list[int]] , UpperCAmelCase__ : list[int] , UpperCAmelCase__ : list[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCAmelCase__ ) )
] # the reference grid
SCREAMING_SNAKE_CASE__ :Any = 1
SCREAMING_SNAKE_CASE__ :Dict = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCAmelCase__ ) )
] # the action grid
SCREAMING_SNAKE_CASE__ :int = init[0]
SCREAMING_SNAKE_CASE__ :Optional[Any] = init[1]
SCREAMING_SNAKE_CASE__ :List[str] = 0
SCREAMING_SNAKE_CASE__ :List[Any] = g + heuristic[x][y] # cost from starting cell to destination cell
SCREAMING_SNAKE_CASE__ :List[Any] = [[f, g, x, y]]
SCREAMING_SNAKE_CASE__ :Any = False # flag that is set when search is complete
SCREAMING_SNAKE_CASE__ :str = False # flag set if we can't find expand
while not found and not resign:
if len(UpperCAmelCase__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
SCREAMING_SNAKE_CASE__ :List[Any] = cell.pop()
SCREAMING_SNAKE_CASE__ :Optional[int] = next_cell[2]
SCREAMING_SNAKE_CASE__ :Any = next_cell[3]
SCREAMING_SNAKE_CASE__ :Dict = next_cell[1]
if x == goal[0] and y == goal[1]:
SCREAMING_SNAKE_CASE__ :Tuple = True
else:
for i in range(len(UpperCAmelCase__ ) ): # to try out different valid actions
SCREAMING_SNAKE_CASE__ :Optional[int] = x + DIRECTIONS[i][0]
SCREAMING_SNAKE_CASE__ :int = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(UpperCAmelCase__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
SCREAMING_SNAKE_CASE__ :str = g + cost
SCREAMING_SNAKE_CASE__ :Union[str, Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = 1
SCREAMING_SNAKE_CASE__ :Any = i
SCREAMING_SNAKE_CASE__ :int = []
SCREAMING_SNAKE_CASE__ :Union[str, Any] = goal[0]
SCREAMING_SNAKE_CASE__ :Optional[int] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
SCREAMING_SNAKE_CASE__ :Optional[Any] = x - DIRECTIONS[action[x][y]][0]
SCREAMING_SNAKE_CASE__ :Optional[Any] = y - DIRECTIONS[action[x][y]][1]
SCREAMING_SNAKE_CASE__ :Optional[int] = xa
SCREAMING_SNAKE_CASE__ :int = ya
invpath.append([x, y] )
SCREAMING_SNAKE_CASE__ :int = []
for i in range(len(UpperCAmelCase__ ) ):
path.append(invpath[len(UpperCAmelCase__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCamelCase_ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCamelCase_ = [0, 0]
# all coordinates are given in format [y,x]
UpperCamelCase_ = [len(grid) - 1, len(grid[0]) - 1]
UpperCamelCase_ = 1
# the cost map which pushes the path closer to the goal
UpperCamelCase_ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCamelCase_ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCamelCase_ = 99
UpperCamelCase_ , UpperCamelCase_ = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 209 | 1 |
from __future__ import annotations
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
create_state_space_tree(SCREAMING_SNAKE_CASE , [] , 0 , [0 for i in range(len(SCREAMING_SNAKE_CASE ) )] )
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if index == len(SCREAMING_SNAKE_CASE ):
print(SCREAMING_SNAKE_CASE )
return
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
UpperCAmelCase__ :Dict = True
create_state_space_tree(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index + 1 , SCREAMING_SNAKE_CASE )
current_sequence.pop()
UpperCAmelCase__ :Optional[Any] = False
__snake_case : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__snake_case : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 714 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , A , A=99 , A=13 , A=16 , A=7 , A=True , A=True , A=True , A=False , A=True , A=2 , A=32 , A=4 , A=4 , A=30 , A=0 , A=1 , A=2 , A=None , ) ->Optional[Any]:
UpperCAmelCase__ :str = parent
UpperCAmelCase__ :int = batch_size
UpperCAmelCase__ :Tuple = decoder_seq_length
# For common tests
UpperCAmelCase__ :Union[str, Any] = self.decoder_seq_length
UpperCAmelCase__ :int = is_training
UpperCAmelCase__ :List[Any] = use_attention_mask
UpperCAmelCase__ :Tuple = use_labels
UpperCAmelCase__ :Any = vocab_size
UpperCAmelCase__ :Dict = d_model
UpperCAmelCase__ :Union[str, Any] = d_model
UpperCAmelCase__ :str = decoder_layers
UpperCAmelCase__ :int = decoder_layers
UpperCAmelCase__ :List[Any] = decoder_ffn_dim
UpperCAmelCase__ :Any = decoder_attention_heads
UpperCAmelCase__ :Any = decoder_attention_heads
UpperCAmelCase__ :Optional[int] = eos_token_id
UpperCAmelCase__ :Optional[int] = bos_token_id
UpperCAmelCase__ :Union[str, Any] = pad_token_id
UpperCAmelCase__ :Optional[int] = decoder_start_token_id
UpperCAmelCase__ :Optional[Any] = use_cache
UpperCAmelCase__ :Tuple = max_position_embeddings
UpperCAmelCase__ :List[str] = None
UpperCAmelCase__ :int = decoder_seq_length
UpperCAmelCase__ :Optional[int] = 2
UpperCAmelCase__ :Optional[Any] = 1
def A__ ( self ) ->Tuple:
UpperCAmelCase__ :List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
UpperCAmelCase__ :str = None
if self.use_attention_mask:
UpperCAmelCase__ :Tuple = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
UpperCAmelCase__ :List[str] = None
if self.use_labels:
UpperCAmelCase__ :Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
UpperCAmelCase__ :Optional[int] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def A__ ( self , A , A , A , A , ) ->Union[str, Any]:
UpperCAmelCase__ :List[Any] = True
UpperCAmelCase__ :Any = TrOCRDecoder(config=A ).to(A ).eval()
UpperCAmelCase__ :Any = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
UpperCAmelCase__ :Optional[int] = model(A , use_cache=A )
UpperCAmelCase__ :Optional[int] = model(A )
UpperCAmelCase__ :Optional[Any] = model(A , use_cache=A )
self.parent.assertTrue(len(A ) == len(A ) )
self.parent.assertTrue(len(A ) == len(A ) + 1 )
UpperCAmelCase__ :int = outputs['past_key_values']
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase__ :Optional[Any] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
UpperCAmelCase__ :Any = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase__ :str = model(A )['last_hidden_state']
UpperCAmelCase__ :List[str] = model(A , past_key_values=A )['last_hidden_state']
# select random slice
UpperCAmelCase__ :Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase__ :Tuple = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
UpperCAmelCase__ :Any = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(A , A , atol=1e-3 )
def A__ ( self ) ->Optional[int]:
UpperCAmelCase__ :int = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :List[Any] = config_and_inputs
UpperCAmelCase__ :Optional[Any] = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase):
'''simple docstring'''
__a : int = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
__a : Optional[int] = (TrOCRForCausalLM,) if is_torch_available() else ()
__a : List[Any] = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {}
__a : Any = True
__a : Union[str, Any] = False
def A__ ( self ) ->str:
UpperCAmelCase__ :Tuple = TrOCRStandaloneDecoderModelTester(self , is_training=A )
UpperCAmelCase__ :Tuple = ConfigTester(self , config_class=A )
def A__ ( self ) ->Optional[Any]:
pass
def A__ ( self ) ->Any:
pass
def A__ ( self ) ->Union[str, Any]:
pass
def A__ ( self ) ->Optional[int]:
self.config_tester.run_common_tests()
def A__ ( self ) ->Optional[Any]:
UpperCAmelCase__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*A )
def A__ ( self ) ->int:
return
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def A__ ( self ) ->List[str]:
pass
| 433 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowercase__( __UpperCamelCase: Any ):
"""simple docstring"""
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
SCREAMING_SNAKE_CASE : List[Any] = key.replace('heads.cmd.mim_head.cls.predictions' ,'mmm_image_head' )
SCREAMING_SNAKE_CASE : int = key.replace('heads.cmd.mlm_head.cls.predictions' ,'mmm_text_head' )
SCREAMING_SNAKE_CASE : str = key.replace('heads.cmd.itm_head.cls' ,'itm_head' )
SCREAMING_SNAKE_CASE : Dict = key.replace('heads.cmd.itm_head.pooler' ,'itm_head.pooler' )
SCREAMING_SNAKE_CASE : Dict = key.replace('heads.cmd.clip_head.logit_scale' ,'flava.logit_scale' )
SCREAMING_SNAKE_CASE : List[Any] = key.replace('heads.fairseq_mlm.cls.predictions' ,'mlm_head' )
SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace('heads.imagenet.mim_head.cls.predictions' ,'mim_head' )
SCREAMING_SNAKE_CASE : List[Any] = key.replace('mm_text_projection' ,'flava.text_to_mm_projection' )
SCREAMING_SNAKE_CASE : List[str] = key.replace('mm_image_projection' ,'flava.image_to_mm_projection' )
SCREAMING_SNAKE_CASE : Optional[Any] = key.replace('image_encoder.module' ,'flava.image_model' )
SCREAMING_SNAKE_CASE : Optional[int] = key.replace('text_encoder.module' ,'flava.text_model' )
SCREAMING_SNAKE_CASE : Dict = key.replace('mm_encoder.module.encoder.cls_token' ,'flava.multimodal_model.cls_token' )
SCREAMING_SNAKE_CASE : Optional[int] = key.replace('mm_encoder.module' ,'flava.multimodal_model' )
SCREAMING_SNAKE_CASE : Optional[Any] = key.replace('text_projection' ,'flava.text_projection' )
SCREAMING_SNAKE_CASE : Optional[int] = key.replace('image_projection' ,'flava.image_projection' )
SCREAMING_SNAKE_CASE : Dict = value.float()
for key, value in codebook_state_dict.items():
SCREAMING_SNAKE_CASE : int = value
return upgrade
@torch.no_grad()
def lowercase__( __UpperCamelCase: Union[str, Any] ,__UpperCamelCase: str ,__UpperCamelCase: Any ,__UpperCamelCase: Optional[int]=None ):
"""simple docstring"""
if config_path is not None:
SCREAMING_SNAKE_CASE : int = FlavaConfig.from_pretrained(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = FlavaConfig()
SCREAMING_SNAKE_CASE : str = FlavaForPreTraining(__UpperCamelCase ).eval()
SCREAMING_SNAKE_CASE : int = convert_dalle_checkpoint(__UpperCamelCase ,__UpperCamelCase ,save_checkpoint=__UpperCamelCase )
if os.path.exists(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : int = torch.load(__UpperCamelCase ,map_location='cpu' )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.hub.load_state_dict_from_url(__UpperCamelCase ,map_location='cpu' )
SCREAMING_SNAKE_CASE : int = upgrade_state_dict(__UpperCamelCase ,__UpperCamelCase )
hf_model.load_state_dict(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = hf_model.state_dict()
SCREAMING_SNAKE_CASE : Optional[int] = count_parameters(__UpperCamelCase )
SCREAMING_SNAKE_CASE : int = count_parameters(__UpperCamelCase ) + count_parameters(__UpperCamelCase )
assert torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 )
hf_model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
UpperCamelCase_ = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 28 |
'''simple docstring'''
from __future__ import annotations
from random import random
class __lowerCAmelCase :
def __init__(self , lowerCAmelCase__ = None ):
_UpperCAmelCase : List[Any] = value
_UpperCAmelCase : Optional[int] = random()
_UpperCAmelCase : Node | None = None
_UpperCAmelCase : Node | None = None
def __repr__(self ):
from pprint import pformat
if self.left is None and self.right is None:
return F"'{self.value}: {self.prior:.5}'"
else:
return pformat(
{F"{self.value}: {self.prior:.5}": (self.left, self.right)} , indent=1 )
def __str__(self ):
_UpperCAmelCase : List[Any] = str(self.value ) + """ """
_UpperCAmelCase : str = str(self.left or """""" )
_UpperCAmelCase : Dict = str(self.right or """""" )
return value + left + right
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_UpperCAmelCase , _UpperCAmelCase : Dict = split(root.left , lowerCAmelCase_ )
return left, root
else:
_UpperCAmelCase , _UpperCAmelCase : List[str] = split(root.right , lowerCAmelCase_ )
return root, right
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_UpperCAmelCase : str = merge(left.right , lowerCAmelCase_ )
return left
else:
_UpperCAmelCase : List[str] = merge(lowerCAmelCase_ , right.left )
return right
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : List[str] = Node(lowerCAmelCase_ )
_UpperCAmelCase , _UpperCAmelCase : str = split(lowerCAmelCase_ , lowerCAmelCase_ )
return merge(merge(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase , _UpperCAmelCase : int = split(lowerCAmelCase_ , value - 1 )
_UpperCAmelCase , _UpperCAmelCase : Dict = split(lowerCAmelCase_ , lowerCAmelCase_ )
return merge(lowerCAmelCase_ , lowerCAmelCase_ )
def __A ( lowerCAmelCase_ ):
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=""",""" )
inorder(root.right )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
for arg in args.split():
if arg[0] == "+":
_UpperCAmelCase : List[str] = insert(lowerCAmelCase_ , int(arg[1:] ) )
elif arg[0] == "-":
_UpperCAmelCase : Tuple = erase(lowerCAmelCase_ , int(arg[1:] ) )
else:
print("""Unknown command""" )
return root
def __A ( ):
_UpperCAmelCase : Union[str, Any] = None
print(
"""enter numbers to create a tree, + value to add value into treap, """
"""- value to erase all nodes with value. 'q' to quit. """ )
_UpperCAmelCase : Tuple = input()
while args != "q":
_UpperCAmelCase : Union[str, Any] = interact_treap(lowerCAmelCase_ , lowerCAmelCase_ )
print(lowerCAmelCase_ )
_UpperCAmelCase : Optional[int] = input()
print("""good by!""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 414 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Tuple = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 417 |
'''simple docstring'''
def lowercase ( lowerCAmelCase : int = 100_0000):
"""simple docstring"""
_A : Any = 1
_A : str = 1
_A : Dict = {1: 1}
for inputa in range(2 , lowerCAmelCase):
_A : Any = 0
_A : str = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_A : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
_A : Dict = counter
if counter > pre_counter:
_A : List[Any] = inputa
_A : str = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 417 | 1 |
"""simple docstring"""
from ... import PretrainedConfig
UpperCAmelCase = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class __magic_name__ ( a_ ):
__A : List[str] = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__A : int = "nezha"
def __init__( self : Optional[int] , snake_case__ : str=2_1_1_2_8 , snake_case__ : Optional[int]=7_6_8 , snake_case__ : List[Any]=1_2 , snake_case__ : Optional[Any]=1_2 , snake_case__ : Any=3_0_7_2 , snake_case__ : List[str]="gelu" , snake_case__ : Optional[Any]=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : Optional[Any]=5_1_2 , snake_case__ : Optional[int]=6_4 , snake_case__ : Optional[int]=2 , snake_case__ : int=0.02 , snake_case__ : Optional[int]=1e-1_2 , snake_case__ : str=0.1 , snake_case__ : List[Any]=0 , snake_case__ : Dict=2 , snake_case__ : Union[str, Any]=3 , snake_case__ : int=True , **snake_case__ : List[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
lowercase :Any = vocab_size
lowercase :Optional[Any] = hidden_size
lowercase :Tuple = num_hidden_layers
lowercase :str = num_attention_heads
lowercase :Union[str, Any] = hidden_act
lowercase :Optional[int] = intermediate_size
lowercase :Union[str, Any] = hidden_dropout_prob
lowercase :List[str] = attention_probs_dropout_prob
lowercase :List[str] = max_position_embeddings
lowercase :Any = max_relative_position
lowercase :List[Any] = type_vocab_size
lowercase :str = initializer_range
lowercase :Union[str, Any] = layer_norm_eps
lowercase :Optional[Any] = classifier_dropout
lowercase :int = use_cache
| 677 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _UpperCamelCase (a_ ):
snake_case_ = """pegasus"""
snake_case_ = ["""past_key_values"""]
snake_case_ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __UpperCamelCase=5_0_2_6_5 , __UpperCamelCase=1_0_2_4 , __UpperCamelCase=1_2 , __UpperCamelCase=4_0_9_6 , __UpperCamelCase=1_6 , __UpperCamelCase=1_2 , __UpperCamelCase=4_0_9_6 , __UpperCamelCase=1_6 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase="gelu" , __UpperCamelCase=1_0_2_4 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0_2 , __UpperCamelCase=0 , __UpperCamelCase=False , __UpperCamelCase=0 , __UpperCamelCase=1 , __UpperCamelCase=1 , **__UpperCamelCase , )-> Tuple:
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = use_cache
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , forced_eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
@property
def __UpperCAmelCase ( self )-> int:
return self.encoder_attention_heads
@property
def __UpperCAmelCase ( self )-> int:
return self.d_model
| 367 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_UpperCAmelCase = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : Any = 'albert'
def __init__( self : Dict , lowerCAmelCase : List[str]=3_0000 , lowerCAmelCase : List[Any]=128 , lowerCAmelCase : List[str]=4096 , lowerCAmelCase : str=12 , lowerCAmelCase : str=1 , lowerCAmelCase : Tuple=64 , lowerCAmelCase : Dict=1_6384 , lowerCAmelCase : int=1 , lowerCAmelCase : str="gelu_new" , lowerCAmelCase : Dict=0 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : str=512 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[Any]=0.0_2 , lowerCAmelCase : Union[str, Any]=1E-12 , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : List[Any]="absolute" , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : int=2 , lowerCAmelCase : Optional[int]=3 , **lowerCAmelCase : int , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =vocab_size
SCREAMING_SNAKE_CASE_: Optional[int] =embedding_size
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_size
SCREAMING_SNAKE_CASE_: Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE_: Any =num_hidden_groups
SCREAMING_SNAKE_CASE_: List[Any] =num_attention_heads
SCREAMING_SNAKE_CASE_: List[Any] =inner_group_num
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_act
SCREAMING_SNAKE_CASE_: int =intermediate_size
SCREAMING_SNAKE_CASE_: Any =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Union[str, Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: int =max_position_embeddings
SCREAMING_SNAKE_CASE_: Any =type_vocab_size
SCREAMING_SNAKE_CASE_: int =initializer_range
SCREAMING_SNAKE_CASE_: List[Any] =layer_norm_eps
SCREAMING_SNAKE_CASE_: Dict =classifier_dropout_prob
SCREAMING_SNAKE_CASE_: int =position_embedding_type
class a ( UpperCAmelCase__ ):
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_: str ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE_: Dict ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 712 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __magic_name__ ( lowercase ):
if "cls_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
SCREAMING_SNAKE_CASE_: List[Any] =name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE_: Union[str, Any] =name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_: Dict =name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
SCREAMING_SNAKE_CASE_: Tuple =name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
SCREAMING_SNAKE_CASE_: Any =name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def __magic_name__ ( lowercase , lowercase ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_: Optional[int] =orig_state_dict.pop(lowercase )
if "qkv" in key:
SCREAMING_SNAKE_CASE_: Dict =key.split(""".""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =int(key_split[1] )
if "decoder_blocks" in key:
SCREAMING_SNAKE_CASE_: int =config.decoder_hidden_size
SCREAMING_SNAKE_CASE_: Optional[int] ="""decoder.decoder_layers."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Dict =val[:dim, :]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: str =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: List[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: List[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Any =config.hidden_size
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""vit.encoder.layer."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim, :]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: Dict =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Any =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Tuple =val
return orig_state_dict
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =ViTMAEConfig()
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: List[Any] =1024
SCREAMING_SNAKE_CASE_: Dict =4096
SCREAMING_SNAKE_CASE_: Tuple =24
SCREAMING_SNAKE_CASE_: int =16
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Union[str, Any] =14
SCREAMING_SNAKE_CASE_: Any =1280
SCREAMING_SNAKE_CASE_: Dict =5120
SCREAMING_SNAKE_CASE_: Optional[int] =32
SCREAMING_SNAKE_CASE_: Optional[Any] =16
SCREAMING_SNAKE_CASE_: Tuple =ViTMAEForPreTraining(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.hub.load_state_dict_from_url(lowercase , map_location="""cpu""" )["""model"""]
SCREAMING_SNAKE_CASE_: Optional[Any] =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: str =convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple ="""https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
SCREAMING_SNAKE_CASE_: List[Any] =Image.open(requests.get(lowercase , stream=lowercase ).raw )
SCREAMING_SNAKE_CASE_: int =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: int =image_processor(images=lowercase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE_: Optional[Any] =model(**lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =outputs.logits
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Dict =torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Tuple =torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
SCREAMING_SNAKE_CASE_: Any =torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 36 | 0 |
import csv
import tweepy
# Twitter API credentials
SCREAMING_SNAKE_CASE :List[str] = ''
SCREAMING_SNAKE_CASE :List[str] = ''
SCREAMING_SNAKE_CASE :Tuple = ''
SCREAMING_SNAKE_CASE :List[str] = ''
def UpperCAmelCase ( a_ ) -> None:
"""simple docstring"""
__A = tweepy.OAuthHandler(a_ , a_ )
auth.set_access_token(a_ , a_ )
__A = tweepy.API(a_ )
# initialize a list to hold all the tweepy Tweets
__A = []
# make initial request for most recent tweets (200 is the maximum allowed count)
__A = api.user_timeline(screen_name=a_ , count=2_0_0 )
# save most recent tweets
alltweets.extend(a_ )
# save the id of the oldest tweet less one
__A = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(a_ ) > 0:
print(F'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
__A = api.user_timeline(
screen_name=a_ , count=2_0_0 , max_id=a_ )
# save most recent tweets
alltweets.extend(a_ )
# update the id of the oldest tweet less one
__A = alltweets[-1].id - 1
print(F'''...{len(a_ )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
__A = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F'''new_{screen_name}_tweets.csv''' , "w" ) as f:
__A = csv.writer(a_ )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(a_ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('FirePing32')
| 55 |
__magic_name__ = {str(digit): digit**5 for digit in range(10)}
def _lowerCAmelCase ( A__: int ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A__ ) )
def _lowerCAmelCase ( ):
'''simple docstring'''
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(A__ ) )
if __name__ == "__main__":
print(solution())
| 254 | 0 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def A_ ( __lowercase = "https://www.worldometers.info/coronavirus" ):
UpperCamelCase_ : Dict =BeautifulSoup(requests.get(__lowercase ).text , 'html.parser' )
UpperCamelCase_ : List[Any] =soup.findAll('h1' )
UpperCamelCase_ : List[str] =soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(__lowercase , __lowercase )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(F"""{key}\n{value}\n""")
| 395 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def A_ ( __lowercase = "https://www.worldometers.info/coronavirus" ):
UpperCamelCase_ : Dict =BeautifulSoup(requests.get(__lowercase ).text , 'html.parser' )
UpperCamelCase_ : List[Any] =soup.findAll('h1' )
UpperCamelCase_ : List[str] =soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(__lowercase , __lowercase )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(F"""{key}\n{value}\n""")
| 395 | 1 |
from __future__ import annotations
from typing import Generic, TypeVar
lowercase_ = TypeVar('T')
class A_ ( Generic[T] ):
'''simple docstring'''
def __init__( self: Tuple , a: T ):
__lowerCamelCase : List[str] = data
__lowerCamelCase : Dict = self
__lowerCamelCase : Any = 0
class A_ ( Generic[T] ):
'''simple docstring'''
def __init__( self: int ):
# map from node name to the node object
__lowerCamelCase : dict[T, DisjointSetTreeNode[T]] = {}
def _snake_case ( self: Union[str, Any] , a: T ):
# create a new set with x as its member
__lowerCamelCase : Union[str, Any] = DisjointSetTreeNode(a )
def _snake_case ( self: Dict , a: T ):
# find the set x belongs to (with path-compression)
__lowerCamelCase : Tuple = self.map[data]
if elem_ref != elem_ref.parent:
__lowerCamelCase : Tuple = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def _snake_case ( self: List[str] , a: DisjointSetTreeNode[T] , a: DisjointSetTreeNode[T] ):
# helper function for union operation
if nodea.rank > nodea.rank:
__lowerCamelCase : List[str] = nodea
else:
__lowerCamelCase : List[str] = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def _snake_case ( self: List[Any] , a: T , a: T ):
# merge 2 disjoint sets
self.link(self.find_set(a ) , self.find_set(a ) )
class A_ ( Generic[T] ):
'''simple docstring'''
def __init__( self: Optional[int] ):
# connections: map from the node to the neighbouring nodes (with weights)
__lowerCamelCase : dict[T, dict[T, int]] = {}
def _snake_case ( self: Optional[int] , a: T ):
# add a node ONLY if its not present in the graph
if node not in self.connections:
__lowerCamelCase : List[str] = {}
def _snake_case ( self: Tuple , a: T , a: T , a: int ):
# add an edge with the given weight
self.add_node(a )
self.add_node(a )
__lowerCamelCase : Dict = weight
__lowerCamelCase : Tuple = weight
def _snake_case ( self: str ):
__lowerCamelCase : Tuple = []
__lowerCamelCase : int = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda a : x[2] )
# creating the disjoint set
__lowerCamelCase : Dict = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(a )
# MST generation
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Union[str, Any] = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = edges[index]
index += 1
__lowerCamelCase : Dict = disjoint_set.find_set(a )
__lowerCamelCase : List[Any] = disjoint_set.find_set(a )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(a , a , a )
disjoint_set.union(a , a )
return graph
| 669 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = CLIPTokenizer
__snake_case = CLIPTokenizerFast
__snake_case = True
__snake_case = {}
__snake_case = False
def _snake_case ( self: Union[str, Any] ):
super().setUp()
# fmt: off
__lowerCamelCase : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__lowerCamelCase : Tuple = dict(zip(a , range(len(a ) ) ) )
__lowerCamelCase : List[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
__lowerCamelCase : Tuple = {'unk_token': '<unk>'}
__lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(a ) )
def _snake_case ( self: Tuple , **a: Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: Union[str, Any] , **a: List[str] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: Optional[int] , a: List[Any] ):
__lowerCamelCase : Tuple = 'lower newer'
__lowerCamelCase : Tuple = 'lower newer'
return input_text, output_text
def _snake_case ( self: List[str] ):
__lowerCamelCase : List[Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCamelCase : Optional[Any] = 'lower newer'
__lowerCamelCase : int = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
__lowerCamelCase : Optional[int] = tokenizer.tokenize(a )
self.assertListEqual(a , a )
__lowerCamelCase : int = tokens + [tokenizer.unk_token]
__lowerCamelCase : int = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
@require_ftfy
def _snake_case ( self: Union[str, Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase : List[Any] = self.tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase : str = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
__lowerCamelCase : Optional[Any] = tokenizer_s.tokenize(a )
__lowerCamelCase : Optional[Any] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__lowerCamelCase : List[Any] = 'xa\u0303y' + ' ' + 'x\xe3y'
__lowerCamelCase : Tuple = tokenizer_s.tokenize(a )
__lowerCamelCase : Any = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of space type
__lowerCamelCase : List[Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__lowerCamelCase : List[Any] = tokenizer_s.tokenize(a )
__lowerCamelCase : Optional[int] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of line break type
__lowerCamelCase : str = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__lowerCamelCase : Dict = tokenizer_s.tokenize(a )
__lowerCamelCase : List[str] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
def _snake_case ( self: List[Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
__lowerCamelCase : Optional[int] = F'{text_of_1_token} {text_of_1_token}'
__lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
__lowerCamelCase : Any = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ) + 1, len(a ) + 1 + len(a )) , )
__lowerCamelCase : List[Any] = F' {text}'
__lowerCamelCase : str = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
__lowerCamelCase : Any = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ) + 1, 1 + len(a ) + 1 + len(a )) , )
def _snake_case ( self: str ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(a ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def _snake_case ( self: Tuple ):
super().test_tokenization_python_rust_equals()
def _snake_case ( self: Tuple ):
# CLIP always lower cases letters
pass
| 669 | 1 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __lowerCamelCase ( ):
A__ = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' ,type=lowerCAmelCase__ ,default=1 ,help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' ,type=lowerCAmelCase__ ,help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) ,)
# rest from the training program
parser.add_argument('training_script_args' ,nargs=lowerCAmelCase__ )
return parser.parse_args()
def __lowerCamelCase ( ):
A__ = parse_args()
# Import training_script as a module.
A__ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
A__ = script_fpath.stem
A__ = importlib.import_module(lowerCAmelCase__ )
# Patch sys.argv
A__ = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 554 |
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
SCREAMING_SNAKE_CASE : Optional[int] = logging.getLogger(__name__)
def __lowerCamelCase ( ):
A__ = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path' ,type=lowerCAmelCase__ ,default='data/dump.txt' ,help='The path to the data.' )
parser.add_argument('--tokenizer_type' ,type=lowerCAmelCase__ ,default='bert' ,choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name' ,type=lowerCAmelCase__ ,default='bert-base-uncased' ,help='The tokenizer to use.' )
parser.add_argument('--dump_file' ,type=lowerCAmelCase__ ,default='data/dump' ,help='The dump file prefix.' )
A__ = parser.parse_args()
logger.info(f'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
A__ = BertTokenizer.from_pretrained(args.tokenizer_name )
A__ = tokenizer.special_tokens_map['cls_token'] # `[CLS]`
A__ = tokenizer.special_tokens_map['sep_token'] # `[SEP]`
elif args.tokenizer_type == "roberta":
A__ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
A__ = tokenizer.special_tokens_map['cls_token'] # `<s>`
A__ = tokenizer.special_tokens_map['sep_token'] # `</s>`
elif args.tokenizer_type == "gpt2":
A__ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
A__ = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>`
A__ = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>`
logger.info(f'''Loading text from {args.file_path}''' )
with open(args.file_path ,'r' ,encoding='utf8' ) as fp:
A__ = fp.readlines()
logger.info('Start encoding' )
logger.info(f'''{len(lowerCAmelCase__ )} examples to process.''' )
A__ = []
A__ = 0
A__ = 1_0000
A__ = time.time()
for text in data:
A__ = f'''{bos} {text.strip()} {sep}'''
A__ = tokenizer.encode(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ )
rslt.append(lowerCAmelCase__ )
iter += 1
if iter % interval == 0:
A__ = time.time()
logger.info(f'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
A__ = time.time()
logger.info('Finished binarization' )
logger.info(f'''{len(lowerCAmelCase__ )} examples processed.''' )
A__ = f'''{args.dump_file}.{args.tokenizer_name}.pickle'''
A__ = tokenizer.vocab_size
if vocab_size < (1 << 16):
A__ = [np.uintaa(lowerCAmelCase__ ) for d in rslt]
else:
A__ = [np.intaa(lowerCAmelCase__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'''Dump to {dp_file}''' )
with open(lowerCAmelCase__ ,'wb' ) as handle:
pickle.dump(rslt_ ,lowerCAmelCase__ ,protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 554 | 1 |
class __a ( SCREAMING_SNAKE_CASE ):
pass
class __a ( SCREAMING_SNAKE_CASE ):
pass
class __a :
def __init__( self : Optional[int])-> Any:
__lowerCAmelCase =[
[],
[],
[],
]
def UpperCamelCase ( self : List[Any] , snake_case_ : int , snake_case_ : int)-> None:
try:
if len(self.queues[priority]) >= 1_00:
raise OverflowError("""Maximum queue size is 100""")
self.queues[priority].append(snake_case_)
except IndexError:
raise ValueError("""Valid priorities are 0, 1, and 2""")
def UpperCamelCase ( self : Tuple)-> int:
for queue in self.queues:
if queue:
return queue.pop(0)
raise UnderFlowError("""All queues are empty""")
def __str__( self : List[Any])-> str:
return "\n".join(F"""Priority {i}: {q}""" for i, q in enumerate(self.queues))
class __a :
def __init__( self : Dict)-> str:
__lowerCAmelCase =[]
def UpperCamelCase ( self : int , snake_case_ : int)-> None:
if len(self.queue) == 1_00:
raise OverFlowError("""Maximum queue size is 100""")
self.queue.append(snake_case_)
def UpperCamelCase ( self : List[Any])-> int:
if not self.queue:
raise UnderFlowError("""The queue is empty""")
else:
__lowerCAmelCase =min(self.queue)
self.queue.remove(snake_case_)
return data
def __str__( self : Dict)-> str:
return str(self.queue)
def __lowerCAmelCase ( ) -> Dict:
__lowerCAmelCase =FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(__lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(__lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __lowerCAmelCase ( ) -> str:
__lowerCAmelCase =ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(__lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(__lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 354 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
def __init__( self : int , snake_case_ : Dict , snake_case_ : List[str]=12 , snake_case_ : Optional[int]=7 , snake_case_ : Optional[Any]=True , snake_case_ : int=True , snake_case_ : int=True , snake_case_ : Any=99 , snake_case_ : List[Any]=32 , snake_case_ : List[Any]=32 , snake_case_ : Any=2 , snake_case_ : Optional[Any]=4 , snake_case_ : Optional[Any]=37 , snake_case_ : Tuple=0.1 , snake_case_ : str=0.1 , snake_case_ : Any=5_12 , snake_case_ : Optional[Any]=0.0_2 , snake_case_ : List[Any]=0 , snake_case_ : Union[str, Any]=None , )-> List[Any]:
__lowerCAmelCase =parent
__lowerCAmelCase =batch_size
__lowerCAmelCase =seq_length
__lowerCAmelCase =is_training
__lowerCAmelCase =use_input_mask
__lowerCAmelCase =use_labels
__lowerCAmelCase =vocab_size
__lowerCAmelCase =hidden_size
__lowerCAmelCase =projection_dim
__lowerCAmelCase =num_hidden_layers
__lowerCAmelCase =num_attention_heads
__lowerCAmelCase =intermediate_size
__lowerCAmelCase =dropout
__lowerCAmelCase =attention_dropout
__lowerCAmelCase =max_position_embeddings
__lowerCAmelCase =initializer_range
__lowerCAmelCase =scope
__lowerCAmelCase =bos_token_id
def UpperCamelCase ( self : List[Any])-> Optional[int]:
__lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__lowerCAmelCase =None
if self.use_input_mask:
__lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length])
if input_mask is not None:
__lowerCAmelCase =input_mask.numpy()
__lowerCAmelCase , __lowerCAmelCase =input_mask.shape
__lowerCAmelCase =np.random.randint(1 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(snake_case_):
__lowerCAmelCase =1
__lowerCAmelCase =0
__lowerCAmelCase =self.get_config()
return config, input_ids, tf.convert_to_tensor(snake_case_)
def UpperCamelCase ( self : List[str])-> str:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def UpperCamelCase ( self : Union[str, Any] , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : List[Any])-> List[str]:
__lowerCAmelCase =TFBlipTextModel(config=snake_case_)
__lowerCAmelCase =model(snake_case_ , attention_mask=snake_case_ , training=snake_case_)
__lowerCAmelCase =model(snake_case_ , training=snake_case_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCamelCase ( self : Dict)-> Tuple:
__lowerCAmelCase =self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase =config_and_inputs
__lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (TFBlipTextModel,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self : Optional[Any])-> Any:
__lowerCAmelCase =BlipTextModelTester(self)
__lowerCAmelCase =ConfigTester(self , config_class=snake_case_ , hidden_size=37)
def UpperCamelCase ( self : List[Any])-> List[str]:
self.config_tester.run_common_tests()
def UpperCamelCase ( self : List[str])-> Dict:
__lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_)
def UpperCamelCase ( self : Optional[Any])-> str:
pass
def UpperCamelCase ( self : List[str])-> int:
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""")
def UpperCamelCase ( self : Optional[Any])-> int:
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""")
def UpperCamelCase ( self : List[Any])-> str:
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""")
def UpperCamelCase ( self : Optional[int])-> Tuple:
pass
@slow
def UpperCamelCase ( self : int)-> str:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase =TFBlipTextModel.from_pretrained(snake_case_)
self.assertIsNotNone(snake_case_)
def UpperCamelCase ( self : str , snake_case_ : List[str]=True)-> int:
super().test_pt_tf_model_equivalence(allow_missing_keys=snake_case_)
| 354 | 1 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
__lowerCAmelCase = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowerCAmelCase = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowerCAmelCase = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Dict=False , ) -> str:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
a_ : Optional[Any] = np.array([re.sub(__SCREAMING_SNAKE_CASE , '''''' , __SCREAMING_SNAKE_CASE ) for x in predictions] )
a_ : int = np.array([re.sub(__SCREAMING_SNAKE_CASE , '''''' , __SCREAMING_SNAKE_CASE ) for x in references] )
else:
a_ : List[str] = np.asarray(__SCREAMING_SNAKE_CASE )
a_ : Any = np.asarray(__SCREAMING_SNAKE_CASE )
if ignore_case:
a_ : List[str] = np.char.lower(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = np.char.lower(__SCREAMING_SNAKE_CASE )
if ignore_punctuation:
a_ : Any = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
a_ : Union[str, Any] = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : int = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
if ignore_numbers:
a_ : int = string.digits.maketrans('''''' , '''''' , string.digits )
a_ : Optional[int] = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : Dict = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = predictions == references
return {"exact_match": np.mean(__SCREAMING_SNAKE_CASE ) * 100}
| 666 |
'''simple docstring'''
import warnings
warnings.warn(
'memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '
'`from accelerate import find_executable_batch_size` to avoid this warning.',
FutureWarning,
)
| 666 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Optional[Any] = logging.get_logger(__name__)
lowercase : Tuple = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : int = 'timesformer'
def __init__( self , __UpperCamelCase=2_24 , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=8 , __UpperCamelCase=7_68 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=30_72 , __UpperCamelCase="gelu" , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-6 , __UpperCamelCase=True , __UpperCamelCase="divided_space_time" , __UpperCamelCase=0 , **__UpperCamelCase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**__UpperCamelCase )
__UpperCamelCase : Union[str, Any] = image_size
__UpperCamelCase : Tuple = patch_size
__UpperCamelCase : Dict = num_channels
__UpperCamelCase : Union[str, Any] = num_frames
__UpperCamelCase : Optional[int] = hidden_size
__UpperCamelCase : List[str] = num_hidden_layers
__UpperCamelCase : List[str] = num_attention_heads
__UpperCamelCase : List[str] = intermediate_size
__UpperCamelCase : Tuple = hidden_act
__UpperCamelCase : Tuple = hidden_dropout_prob
__UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCamelCase : Any = initializer_range
__UpperCamelCase : Union[str, Any] = layer_norm_eps
__UpperCamelCase : Dict = qkv_bias
__UpperCamelCase : Any = attention_type
__UpperCamelCase : Dict = drop_path_rate | 327 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : torch.FloatTensor
lowercase : Optional[torch.FloatTensor] = None
def UpperCAmelCase_ (_lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str]=0.999 , _lowerCAmelCase : str="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(_lowerCAmelCase : Any ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_lowerCAmelCase : Tuple ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__UpperCamelCase : Dict = []
for i in range(_lowerCAmelCase ):
__UpperCamelCase : Union[str, Any] = i / num_diffusion_timesteps
__UpperCamelCase : Tuple = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowerCAmelCase ) / alpha_bar_fn(_lowerCAmelCase ) , _lowerCAmelCase ) )
return torch.tensor(_lowerCAmelCase , dtype=torch.floataa )
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
@register_to_config
def __init__( self , __UpperCamelCase = 10_00 , __UpperCamelCase = "fixed_small_log" , __UpperCamelCase = True , __UpperCamelCase = 1.0 , __UpperCamelCase = "epsilon" , __UpperCamelCase = "squaredcos_cap_v2" , ) -> Tuple:
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
__UpperCamelCase : List[str] = betas_for_alpha_bar(__UpperCamelCase )
__UpperCamelCase : Optional[Any] = 1.0 - self.betas
__UpperCamelCase : List[Any] = torch.cumprod(self.alphas , dim=0 )
__UpperCamelCase : Optional[int] = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
__UpperCamelCase : List[str] = 1.0
# setable values
__UpperCamelCase : List[str] = None
__UpperCamelCase : str = torch.from_numpy(np.arange(0 , __UpperCamelCase )[::-1].copy() )
__UpperCamelCase : Union[str, Any] = variance_type
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : Any = num_inference_steps
__UpperCamelCase : List[str] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
__UpperCamelCase : List[str] = (np.arange(0 , __UpperCamelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
__UpperCamelCase : Any = torch.from_numpy(__UpperCamelCase ).to(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None ) -> Any:
'''simple docstring'''
if prev_timestep is None:
__UpperCamelCase : Union[str, Any] = t - 1
__UpperCamelCase : Union[str, Any] = self.alphas_cumprod[t]
__UpperCamelCase : Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__UpperCamelCase : int = 1 - alpha_prod_t
__UpperCamelCase : Union[str, Any] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__UpperCamelCase : str = self.betas[t]
else:
__UpperCamelCase : Optional[int] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__UpperCamelCase : Union[str, Any] = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
__UpperCamelCase : Any = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
__UpperCamelCase : Dict = torch.log(torch.clamp(__UpperCamelCase , min=1E-20 ) )
__UpperCamelCase : Dict = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
__UpperCamelCase : Tuple = variance.log()
__UpperCamelCase : str = beta.log()
__UpperCamelCase : Union[str, Any] = (predicted_variance + 1) / 2
__UpperCamelCase : Union[str, Any] = frac * max_log + (1 - frac) * min_log
return variance
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase=None , __UpperCamelCase = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
'''simple docstring'''
__UpperCamelCase : Dict = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
__UpperCamelCase , __UpperCamelCase : Optional[Any] = torch.split(__UpperCamelCase , sample.shape[1] , dim=1 )
else:
__UpperCamelCase : Any = None
# 1. compute alphas, betas
if prev_timestep is None:
__UpperCamelCase : List[str] = t - 1
__UpperCamelCase : Optional[int] = self.alphas_cumprod[t]
__UpperCamelCase : List[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__UpperCamelCase : Tuple = 1 - alpha_prod_t
__UpperCamelCase : Tuple = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__UpperCamelCase : Any = self.betas[t]
__UpperCamelCase : Any = self.alphas[t]
else:
__UpperCamelCase : List[Any] = 1 - alpha_prod_t / alpha_prod_t_prev
__UpperCamelCase : Union[str, Any] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__UpperCamelCase : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__UpperCamelCase : Any = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__UpperCamelCase : Optional[int] = torch.clamp(
__UpperCamelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__UpperCamelCase : Dict = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
__UpperCamelCase : List[str] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__UpperCamelCase : List[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__UpperCamelCase : int = 0
if t > 0:
__UpperCamelCase : str = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=__UpperCamelCase , device=model_output.device )
__UpperCamelCase : int = self._get_variance(
__UpperCamelCase , predicted_variance=__UpperCamelCase , prev_timestep=__UpperCamelCase , )
if self.variance_type == "fixed_small_log":
__UpperCamelCase : Any = variance
elif self.variance_type == "learned_range":
__UpperCamelCase : List[Any] = (0.5 * variance).exp()
else:
raise ValueError(
f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
" for the UnCLIPScheduler." )
__UpperCamelCase : Tuple = variance * variance_noise
__UpperCamelCase : Optional[int] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__UpperCamelCase , pred_original_sample=__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> torch.FloatTensor:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
__UpperCamelCase : Any = timesteps.to(original_samples.device )
__UpperCamelCase : List[Any] = alphas_cumprod[timesteps] ** 0.5
__UpperCamelCase : List[str] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
__UpperCamelCase : List[Any] = sqrt_alpha_prod.unsqueeze(-1 )
__UpperCamelCase : Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
__UpperCamelCase : Union[str, Any] = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
__UpperCamelCase : Tuple = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
__UpperCamelCase : Optional[int] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples | 327 | 1 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
snake_case = [R"h\.\d+\.attn\.bias", R"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self : Any ,_SCREAMING_SNAKE_CASE : int ,_SCREAMING_SNAKE_CASE : int ,_SCREAMING_SNAKE_CASE : Optional[int] = None ,_SCREAMING_SNAKE_CASE : int = 5_0_2_5_7 ,_SCREAMING_SNAKE_CASE : int = 1_0_2_4 ,_SCREAMING_SNAKE_CASE : int = 7_6_8 ,_SCREAMING_SNAKE_CASE : int = 1_2 ,_SCREAMING_SNAKE_CASE : int = 1_2 ,_SCREAMING_SNAKE_CASE : Optional[int] = None ,_SCREAMING_SNAKE_CASE : str = "gelu_new" ,_SCREAMING_SNAKE_CASE : float = 0.1 ,_SCREAMING_SNAKE_CASE : float = 0.1 ,_SCREAMING_SNAKE_CASE : float = 0.1 ,_SCREAMING_SNAKE_CASE : float = 1E-5 ,_SCREAMING_SNAKE_CASE : float = 0.02 ,_SCREAMING_SNAKE_CASE : bool = True ,_SCREAMING_SNAKE_CASE : bool = True ,_SCREAMING_SNAKE_CASE : bool = False ,_SCREAMING_SNAKE_CASE : bool = False ,) -> Tuple:
'''simple docstring'''
super().__init__()
A = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'
f' `n_embd`: {n_embd} are not equal.' )
A = prefix_inner_dim
A = prefix_hidden_dim
A = (
nn.Linear(self.prefix_inner_dim ,self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A = (
nn.Linear(self.prefix_hidden_dim ,_SCREAMING_SNAKE_CASE ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A = GPTaConfig(
vocab_size=_SCREAMING_SNAKE_CASE ,n_positions=_SCREAMING_SNAKE_CASE ,n_embd=_SCREAMING_SNAKE_CASE ,n_layer=_SCREAMING_SNAKE_CASE ,n_head=_SCREAMING_SNAKE_CASE ,n_inner=_SCREAMING_SNAKE_CASE ,activation_function=_SCREAMING_SNAKE_CASE ,resid_pdrop=_SCREAMING_SNAKE_CASE ,embd_pdrop=_SCREAMING_SNAKE_CASE ,attn_pdrop=_SCREAMING_SNAKE_CASE ,layer_norm_epsilon=_SCREAMING_SNAKE_CASE ,initializer_range=_SCREAMING_SNAKE_CASE ,scale_attn_weights=_SCREAMING_SNAKE_CASE ,use_cache=_SCREAMING_SNAKE_CASE ,scale_attn_by_inverse_layer_idx=_SCREAMING_SNAKE_CASE ,reorder_and_upcast_attn=_SCREAMING_SNAKE_CASE ,)
A = GPTaLMHeadModel(_SCREAMING_SNAKE_CASE )
def A( self : Optional[int] ,_SCREAMING_SNAKE_CASE : torch.Tensor ,_SCREAMING_SNAKE_CASE : torch.Tensor ,_SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None ,_SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None ,) -> Tuple:
'''simple docstring'''
A = self.transformer.transformer.wte(_SCREAMING_SNAKE_CASE )
A = self.encode_prefix(_SCREAMING_SNAKE_CASE )
A = self.decode_prefix(_SCREAMING_SNAKE_CASE )
A = torch.cat((prefix_embeds, embedding_text) ,dim=1 )
if labels is not None:
A = self.get_dummy_token(input_ids.shape[0] ,input_ids.device )
A = torch.cat((dummy_token, input_ids) ,dim=1 )
A = self.transformer(inputs_embeds=_SCREAMING_SNAKE_CASE ,labels=_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def A( self : Any ,_SCREAMING_SNAKE_CASE : int ,_SCREAMING_SNAKE_CASE : torch.device ) -> torch.Tensor:
'''simple docstring'''
return torch.zeros(_SCREAMING_SNAKE_CASE ,self.prefix_length ,dtype=torch.intaa ,device=_SCREAMING_SNAKE_CASE )
def A( self : List[str] ,_SCREAMING_SNAKE_CASE : int ) -> Optional[Any]:
'''simple docstring'''
return self.encode_prefix(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def A( self : List[str] ,_SCREAMING_SNAKE_CASE : int ,_SCREAMING_SNAKE_CASE : int ,_SCREAMING_SNAKE_CASE : Union[str, Any] ) -> str:
'''simple docstring'''
A = torch.split(_SCREAMING_SNAKE_CASE ,1 ,dim=0 )
A = []
A = []
for feature in features:
A = self.decode_prefix(feature.to(_SCREAMING_SNAKE_CASE ) ) # back to the clip feature
# Only support beam search for now
A , A = self.generate_beam(
input_embeds=_SCREAMING_SNAKE_CASE ,device=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A = torch.stack(_SCREAMING_SNAKE_CASE )
A = torch.stack(_SCREAMING_SNAKE_CASE )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def A( self : Dict ,_SCREAMING_SNAKE_CASE : List[str]=None ,_SCREAMING_SNAKE_CASE : str=None ,_SCREAMING_SNAKE_CASE : Any=None ,_SCREAMING_SNAKE_CASE : int = 5 ,_SCREAMING_SNAKE_CASE : int = 6_7 ,_SCREAMING_SNAKE_CASE : float = 1.0 ,_SCREAMING_SNAKE_CASE : Optional[int] = None ,) -> str:
'''simple docstring'''
A = eos_token_id
A = None
A = None
A = torch.ones(_SCREAMING_SNAKE_CASE ,device=_SCREAMING_SNAKE_CASE ,dtype=torch.int )
A = torch.zeros(_SCREAMING_SNAKE_CASE ,device=_SCREAMING_SNAKE_CASE ,dtype=torch.bool )
if input_embeds is not None:
A = input_embeds
else:
A = self.transformer.transformer.wte(_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE ):
A = self.transformer(inputs_embeds=_SCREAMING_SNAKE_CASE )
A = outputs.logits
A = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A = logits.softmax(-1 ).log()
if scores is None:
A , A = logits.topk(_SCREAMING_SNAKE_CASE ,-1 )
A = generated.expand(_SCREAMING_SNAKE_CASE ,*generated.shape[1:] )
A , A = next_tokens.permute(1 ,0 ), scores.squeeze(0 )
if tokens is None:
A = next_tokens
else:
A = tokens.expand(_SCREAMING_SNAKE_CASE ,*tokens.shape[1:] )
A = torch.cat((tokens, next_tokens) ,dim=1 )
else:
A = -float(np.inf )
A = 0
A = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A = scores_sum / seq_lengths[:, None]
A , A = scores_sum_average.view(-1 ).topk(_SCREAMING_SNAKE_CASE ,-1 )
A = next_tokens // scores_sum.shape[1]
A = seq_lengths[next_tokens_source]
A = next_tokens % scores_sum.shape[1]
A = next_tokens.unsqueeze(1 )
A = tokens[next_tokens_source]
A = torch.cat((tokens, next_tokens) ,dim=1 )
A = generated[next_tokens_source]
A = scores_sum_average * seq_lengths
A = is_stopped[next_tokens_source]
A = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] ,1 ,-1 )
A = torch.cat((generated, next_token_embed) ,dim=1 )
A = is_stopped + next_tokens.eq(_SCREAMING_SNAKE_CASE ).squeeze()
if is_stopped.all():
break
A = scores / seq_lengths
A = scores.argsort(descending=_SCREAMING_SNAKE_CASE )
# tokens tensors are already padded to max_seq_length
A = [tokens[i] for i in order]
A = torch.stack(_SCREAMING_SNAKE_CASE ,dim=0 )
A = torch.tensor([seq_lengths[i] for i in order] ,dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 110 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
lowerCAmelCase_ = '.'
if __name__ == "__main__":
lowerCAmelCase_ = os.path.join(REPO_PATH, 'utils/documentation_tests.txt')
lowerCAmelCase_ = []
lowerCAmelCase_ = []
with open(doctest_file_path) as fp:
for line in fp:
lowerCAmelCase_ = line.strip()
lowerCAmelCase_ = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
lowerCAmelCase_ = '\n'.join(non_existent_paths)
raise ValueError(f'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
| 110 | 1 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : Union[str, Any] = ['''image_processor''', '''feature_extractor''']
UpperCamelCase_ : int = '''TvltImageProcessor'''
UpperCamelCase_ : Dict = '''TvltFeatureExtractor'''
def __init__( self : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int )-> List[Any]:
"""simple docstring"""
super().__init__(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
UpperCamelCase = image_processor
UpperCamelCase = feature_extractor
def __call__( self : Tuple , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : int=False , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[str] , )-> int:
"""simple docstring"""
if images is None and audio is None:
raise ValueError("You need to specify either an `images` or `audio` input to process." )
UpperCamelCase = None
if images is not None:
UpperCamelCase = self.image_processor(UpperCAmelCase_ , mask_pixel=UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
if images_mixed is not None:
UpperCamelCase = self.image_processor(UpperCAmelCase_ , is_mixed=UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
if audio is not None:
UpperCamelCase = self.feature_extractor(
UpperCAmelCase_ , *UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , mask_audio=UpperCAmelCase_ , **UpperCAmelCase_ )
UpperCamelCase = {}
if audio is not None:
output_dict.update(UpperCAmelCase_ )
if images is not None:
output_dict.update(UpperCAmelCase_ )
if images_mixed_dict is not None:
output_dict.update(UpperCAmelCase_ )
return output_dict
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.image_processor.model_input_names
UpperCamelCase = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 554 |
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_=7 )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase = None
if token is not None:
UpperCamelCase = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
# The id of a workflow (not of a workflow run)
UpperCamelCase = "636036"
UpperCamelCase = F"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"
UpperCamelCase = requests.get(UpperCAmelCase_ , headers=UpperCAmelCase_ ).json()
return result["workflow_runs"]
def lowerCamelCase__ ( UpperCAmelCase_ )-> Any:
"""simple docstring"""
UpperCamelCase = get_daily_ci_runs(UpperCAmelCase_ )
UpperCamelCase = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
UpperCamelCase = workflow_run["id"]
break
return workflow_run_id
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> int:
"""simple docstring"""
UpperCamelCase = get_last_daily_ci_runs(UpperCAmelCase_ )
if workflow_run_id is not None:
UpperCamelCase = get_artifacts_links(worflow_run_id=UpperCAmelCase_ , token=UpperCAmelCase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
UpperCamelCase = artifacts_links[artifact_name]
download_artifact(
artifact_name=UpperCAmelCase_ , artifact_url=UpperCAmelCase_ , output_dir=UpperCAmelCase_ , token=UpperCAmelCase_ )
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> Optional[int]:
"""simple docstring"""
get_last_daily_ci_artifacts(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = {}
for artifact_name in artifact_names:
UpperCamelCase = os.path.join(UpperCAmelCase_ , F"{artifact_name}.zip" )
if os.path.isfile(UpperCAmelCase_ ):
UpperCamelCase = {}
with zipfile.ZipFile(UpperCAmelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCAmelCase_ ):
# read the file
with z.open(UpperCAmelCase_ ) as f:
UpperCamelCase = f.read().decode("UTF-8" )
return results
| 554 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : Optional[int] = 'vit'
def __init__( self , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-1_2 , __UpperCAmelCase=224 , __UpperCAmelCase=16 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=16 , **__UpperCAmelCase , ) -> str:
super().__init__(**__UpperCAmelCase )
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = initializer_range
_a = layer_norm_eps
_a = image_size
_a = patch_size
_a = num_channels
_a = qkv_bias
_a = encoder_stride
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : Dict = version.parse('1.11' )
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _UpperCAmelCase ( self ) -> float:
return 1e-4 | 715 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 285 | 0 |
import math
def lowercase ( SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase ( SCREAMING_SNAKE_CASE = 1_00_01 ) -> int:
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE_ = int(SCREAMING_SNAKE_CASE )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 2
while len(SCREAMING_SNAKE_CASE ) < nth:
if is_prime(SCREAMING_SNAKE_CASE ):
primes.append(SCREAMING_SNAKE_CASE )
num += 1
else:
num += 1
return primes[len(SCREAMING_SNAKE_CASE ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 205 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : int = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE__ : Dict = {
"vocab_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"
),
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"
),
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt",
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"
),
"bert-base-multilingual-cased": (
"https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"
),
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-cased": (
"https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE__ : List[Any] = {
"bert-base-uncased": 5_12,
"bert-large-uncased": 5_12,
"bert-base-cased": 5_12,
"bert-large-cased": 5_12,
"bert-base-multilingual-uncased": 5_12,
"bert-base-multilingual-cased": 5_12,
"bert-base-chinese": 5_12,
"bert-base-german-cased": 5_12,
"bert-large-uncased-whole-word-masking": 5_12,
"bert-large-cased-whole-word-masking": 5_12,
"bert-large-uncased-whole-word-masking-finetuned-squad": 5_12,
"bert-large-cased-whole-word-masking-finetuned-squad": 5_12,
"bert-base-cased-finetuned-mrpc": 5_12,
"bert-base-german-dbmdz-cased": 5_12,
"bert-base-german-dbmdz-uncased": 5_12,
"TurkuNLP/bert-base-finnish-cased-v1": 5_12,
"TurkuNLP/bert-base-finnish-uncased-v1": 5_12,
"wietsedv/bert-base-dutch-cased": 5_12,
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"bert-base-uncased": {"do_lower_case": True},
"bert-large-uncased": {"do_lower_case": True},
"bert-base-cased": {"do_lower_case": False},
"bert-large-cased": {"do_lower_case": False},
"bert-base-multilingual-uncased": {"do_lower_case": True},
"bert-base-multilingual-cased": {"do_lower_case": False},
"bert-base-chinese": {"do_lower_case": False},
"bert-base-german-cased": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking": {"do_lower_case": True},
"bert-large-cased-whole-word-masking": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True},
"bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False},
"bert-base-cased-finetuned-mrpc": {"do_lower_case": False},
"bert-base-german-dbmdz-cased": {"do_lower_case": False},
"bert-base-german-dbmdz-uncased": {"do_lower_case": True},
"TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False},
"TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True},
"wietsedv/bert-base-dutch-cased": {"do_lower_case": False},
}
class a_ ( SCREAMING_SNAKE_CASE__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_INIT_CONFIGURATION
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = BertTokenizer
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="[UNK]" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="[PAD]" , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[MASK]" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
super().__init__(
SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , tokenize_chinese_chars=SCREAMING_SNAKE_CASE , strip_accents=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE_ = getattr(SCREAMING_SNAKE_CASE , normalizer_state.pop('type' ) )
SCREAMING_SNAKE_CASE_ = do_lower_case
SCREAMING_SNAKE_CASE_ = strip_accents
SCREAMING_SNAKE_CASE_ = tokenize_chinese_chars
SCREAMING_SNAKE_CASE_ = normalizer_class(**SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = do_lower_case
def A_( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE )
return tuple(SCREAMING_SNAKE_CASE )
| 205 | 1 |
"""simple docstring"""
from statistics import mean
import numpy as np
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> list:
a_ : str = 0
# Number of processes finished
a_ : Tuple = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
a_ : Tuple = [0] * no_of_process
# List to include calculation results
a_ : Optional[int] = [0] * no_of_process
# Sort by arrival time.
a_ : Any = [burst_time[i] for i in np.argsort(SCREAMING_SNAKE_CASE__ )]
a_ : Dict = [process_name[i] for i in np.argsort(SCREAMING_SNAKE_CASE__ )]
arrival_time.sort()
while no_of_process > finished_process_count:
a_ : Union[str, Any] = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
a_ : Dict = arrival_time[i]
a_ : str = 0
# Index showing the location of the process being performed
a_ : List[Any] = 0
# Saves the current response ratio.
a_ : Union[str, Any] = 0
for i in range(0, SCREAMING_SNAKE_CASE__ ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
a_ : int = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
a_ : List[str] = temp
a_ : Optional[Any] = i
# Calculate the turn around time
a_ : int = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
a_ : int = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> list:
a_ : Optional[Any] = [0] * no_of_process
for i in range(0, SCREAMING_SNAKE_CASE__ ):
a_ : List[str] = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = 5
SCREAMING_SNAKE_CASE_ = ["""A""", """B""", """C""", """D""", """E"""]
SCREAMING_SNAKE_CASE_ = [1, 2, 3, 4, 5]
SCREAMING_SNAKE_CASE_ = [1, 2, 3, 4, 5]
SCREAMING_SNAKE_CASE_ = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
SCREAMING_SNAKE_CASE_ = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""")
for i in range(0, no_of_process):
print(
F"""{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t"""
F"""{turn_around_time[i]}\t\t\t{waiting_time[i]}"""
)
print(F"""average waiting time : {mean(waiting_time):.5f}""")
print(F"""average turn around time : {mean(turn_around_time):.5f}""") | 370 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case_ ( a_ ):
__lowerCAmelCase = ["image_processor", "tokenizer"]
__lowerCAmelCase = "ViltImageProcessor"
__lowerCAmelCase = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , a_=None , a_=None , **a_ ):
a_ : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a_ , )
a_ : List[Any] = kwargs.pop("feature_extractor" )
a_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a_ , a_ )
a_ : Dict = self.image_processor
def __call__( self , a_ , a_ = None , a_ = True , a_ = False , a_ = None , a_ = None , a_ = 0 , a_ = None , a_ = None , a_ = None , a_ = False , a_ = False , a_ = False , a_ = False , a_ = True , a_ = None , **a_ , ):
a_ : Union[str, Any] = self.tokenizer(
text=a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , stride=a_ , pad_to_multiple_of=a_ , return_token_type_ids=a_ , return_attention_mask=a_ , return_overflowing_tokens=a_ , return_special_tokens_mask=a_ , return_offsets_mapping=a_ , return_length=a_ , verbose=a_ , return_tensors=a_ , **a_ , )
# add pixel_values + pixel_mask
a_ : List[Any] = self.image_processor(a_ , return_tensors=a_ )
encoding.update(a_ )
return encoding
def snake_case_ ( self , *a_ , **a_ ):
return self.tokenizer.batch_decode(*a_ , **a_ )
def snake_case_ ( self , *a_ , **a_ ):
return self.tokenizer.decode(*a_ , **a_ )
@property
def snake_case_ ( self ):
a_ : Union[str, Any] = self.tokenizer.model_input_names
a_ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def snake_case_ ( self ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a_ , )
return self.image_processor_class
@property
def snake_case_ ( self ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a_ , )
return self.image_processor | 370 | 1 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
SCREAMING_SNAKE_CASE__ = tf_top_k_top_p_filtering(__lowerCamelCase , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
SCREAMING_SNAKE_CASE__ = output[output != -float('''inf''' )]
SCREAMING_SNAKE_CASE__ = tf.cast(
tf.where(tf.not_equal(__lowerCamelCase , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , rtol=1e-12 )
tf.debugging.assert_equal(__lowerCamelCase , __lowerCamelCase )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase , A__ ):
"""simple docstring"""
if is_tf_available():
a = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def lowercase_ ( self : Dict ) -> Union[str, Any]:
# TF-only test: tf.saved_model export
SCREAMING_SNAKE_CASE__ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 2
class UpperCAmelCase__ ( tf.Module ):
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : Union[str, Any] ) -> Any:
super(__lowerCamelCase , self ).__init__()
SCREAMING_SNAKE_CASE__ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=__lowerCamelCase , )
def lowercase_ ( self : Any , __lowerCamelCase : Tuple , __lowerCamelCase : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.model.generate(
input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase , max_new_tokens=__lowerCamelCase , return_dict_in_generate=__lowerCamelCase , )
return {"sequences": outputs["sequences"]}
SCREAMING_SNAKE_CASE__ = [[2, 0], [102, 103]]
SCREAMING_SNAKE_CASE__ = [[1, 0], [1, 1]]
SCREAMING_SNAKE_CASE__ = DummyModel(model=__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__lowerCamelCase , __lowerCamelCase , signatures={'''serving_default''': dummy_model.serving} )
SCREAMING_SNAKE_CASE__ = tf.saved_model.load(__lowerCamelCase ).signatures['''serving_default''']
for batch_size in range(1 , len(__lowerCamelCase ) + 1 ):
SCREAMING_SNAKE_CASE__ = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
SCREAMING_SNAKE_CASE__ = serving_func(**__lowerCamelCase )['''sequences''']
SCREAMING_SNAKE_CASE__ = test_model.generate(**__lowerCamelCase , max_new_tokens=__lowerCamelCase )
tf.debugging.assert_equal(__lowerCamelCase , __lowerCamelCase )
@slow
def lowercase_ ( self : Optional[Any] ) -> int:
# TF-only test: tf.saved_model export
SCREAMING_SNAKE_CASE__ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
class UpperCAmelCase__ ( tf.Module ):
"""simple docstring"""
def __init__( self : List[str] , __lowerCamelCase : str ) -> List[Any]:
super(__lowerCamelCase , self ).__init__()
SCREAMING_SNAKE_CASE__ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=__lowerCamelCase , )
def lowercase_ ( self : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.model.generate(
input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase , max_new_tokens=__lowerCamelCase , return_dict_in_generate=__lowerCamelCase , )
return {"sequences": outputs["sequences"]}
SCREAMING_SNAKE_CASE__ = [[2], [102, 103]]
SCREAMING_SNAKE_CASE__ = [[1], [1, 1]]
SCREAMING_SNAKE_CASE__ = DummyModel(model=__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__lowerCamelCase , __lowerCamelCase , signatures={'''serving_default''': dummy_model.serving} )
SCREAMING_SNAKE_CASE__ = tf.saved_model.load(__lowerCamelCase ).signatures['''serving_default''']
for input_row in range(len(__lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
SCREAMING_SNAKE_CASE__ = serving_func(**__lowerCamelCase )['''sequences''']
SCREAMING_SNAKE_CASE__ = test_model.generate(**__lowerCamelCase , max_new_tokens=__lowerCamelCase )
tf.debugging.assert_equal(__lowerCamelCase , __lowerCamelCase )
@slow
@require_tensorflow_text
def lowercase_ ( self : Dict ) -> List[Any]:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=__lowerCamelCase )
class UpperCAmelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : int ) -> List[Any]:
super().__init__()
SCREAMING_SNAKE_CASE__ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(__lowerCamelCase , '''spiece.model''' ) , '''rb''' ).read() )
SCREAMING_SNAKE_CASE__ = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def lowercase_ ( self : int , __lowerCamelCase : Dict , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.tokenizer.tokenize(__lowerCamelCase )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = text.pad_model_inputs(
__lowerCamelCase , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
SCREAMING_SNAKE_CASE__ = self.model.generate(input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase )
return self.tokenizer.detokenize(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = CompleteSentenceTransformer()
SCREAMING_SNAKE_CASE__ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
SCREAMING_SNAKE_CASE__ = complete_model(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tf.keras.Model(__lowerCamelCase , __lowerCamelCase )
keras_model.save(__lowerCamelCase )
def lowercase_ ( self : str ) -> Any:
# Has PT equivalent: this test relies on random sampling
SCREAMING_SNAKE_CASE__ = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 10,
'''temperature''': 0.7,
}
SCREAMING_SNAKE_CASE__ = 14
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
SCREAMING_SNAKE_CASE__ = '''Hello, my dog is cute and'''
SCREAMING_SNAKE_CASE__ = tokenizer(__lowerCamelCase , return_tensors='''tf''' )
SCREAMING_SNAKE_CASE__ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
SCREAMING_SNAKE_CASE__ = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
SCREAMING_SNAKE_CASE__ = model.generate(**__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
SCREAMING_SNAKE_CASE__ = [638, 198]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
SCREAMING_SNAKE_CASE__ = model.generate(**__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def lowercase_ ( self : Union[str, Any] ) -> List[Any]:
# Has PT equivalent: ample use of framework-specific code
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
SCREAMING_SNAKE_CASE__ = '''Hugging Face is a technology company based in New York and Paris.'''
SCREAMING_SNAKE_CASE__ = bart_tokenizer(__lowerCamelCase , return_tensors='''tf''' ).input_ids
SCREAMING_SNAKE_CASE__ = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
SCREAMING_SNAKE_CASE__ = bart_model.generate(__lowerCamelCase ).numpy()
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def lowercase_ ( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any]=None , **__lowerCamelCase : Dict ) -> Optional[int]:
return super().call(__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
SCREAMING_SNAKE_CASE__ = bart_model.generate(__lowerCamelCase , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(__lowerCamelCase , __lowerCamelCase ) )
class UpperCAmelCase__ ( bart_model.model.encoder.__class__ ):
"""simple docstring"""
def lowercase_ ( self : List[Any] , __lowerCamelCase : str , **__lowerCamelCase : Union[str, Any] ) -> Optional[int]:
return super().call(__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = FakeEncoder(bart_model.config , bart_model.model.shared )
SCREAMING_SNAKE_CASE__ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
SCREAMING_SNAKE_CASE__ = bart_model.generate(__lowerCamelCase ).numpy()
with self.assertRaises(__lowerCamelCase ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(__lowerCamelCase , foo='''bar''' )
| 493 |
import math
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [True] * n
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
SCREAMING_SNAKE_CASE__ = i * 2
while index < n:
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = index + i
SCREAMING_SNAKE_CASE__ = [2]
for i in range(3 , _A , 2 ):
if is_prime[i]:
primes.append(_A )
return primes
def UpperCAmelCase_ ( _A = 99_99_66_66_33_33 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = math.floor(math.sqrt(_A ) ) + 1_00
SCREAMING_SNAKE_CASE__ = prime_sieve(_A )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = primes[prime_index]
while (last_prime**2) <= limit:
SCREAMING_SNAKE_CASE__ = primes[prime_index + 1]
SCREAMING_SNAKE_CASE__ = last_prime**2
SCREAMING_SNAKE_CASE__ = next_prime**2
# Get numbers divisible by lps(current)
SCREAMING_SNAKE_CASE__ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
SCREAMING_SNAKE_CASE__ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
SCREAMING_SNAKE_CASE__ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
SCREAMING_SNAKE_CASE__ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 493 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case : int = {
'configuration_table_transformer': [
'TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TableTransformerConfig',
'TableTransformerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = [
'TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TableTransformerForObjectDetection',
'TableTransformerModel',
'TableTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
_snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 421 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a_ ( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCAmelCase_ ):
requests.request('GET', 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET', 'https://huggingface.co', timeout=1.0 )
@pytest.mark.integration
def a_ ( ):
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET', 'https://huggingface.co' )
def a_ ( ):
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCAmelCase_ ):
http_head('https://huggingface.co' )
| 421 | 1 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class a :
"""simple docstring"""
@staticmethod
def __magic_name__ ( *snake_case_ : Optional[int] , **snake_case_ : Tuple ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def __magic_name__ ( self : Tuple , snake_case_ : int , snake_case_ : Any , snake_case_ : Any ):
'''simple docstring'''
snake_case__ : Dict = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
snake_case__ : Optional[Any] = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def __magic_name__ ( self : int , snake_case_ : str , snake_case_ : str ):
'''simple docstring'''
snake_case__ : Dict = object_detector(examples[0] , threshold=0.0 )
snake_case__ : Optional[Any] = len(_lowerCamelCase )
self.assertGreater(_lowerCamelCase , 0 )
self.assertEqual(
_lowerCamelCase , [
{
'''score''': ANY(_lowerCamelCase ),
'''label''': ANY(_lowerCamelCase ),
'''box''': {'''xmin''': ANY(_lowerCamelCase ), '''ymin''': ANY(_lowerCamelCase ), '''xmax''': ANY(_lowerCamelCase ), '''ymax''': ANY(_lowerCamelCase )},
}
for i in range(_lowerCamelCase )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
pass
@require_torch
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ : Tuple = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
snake_case__ : str = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.6_4 , )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{'''score''': 0.7_2_3_5, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.7_2_1_8, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.7_1_8_4, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.6_7_4_8, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.6_6_5_6, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.6_6_1_4, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.6_4_5_6, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.6_4_2, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.6_4_1_9, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
] , )
snake_case__ : str = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.6_4 , )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.7_2_3_5, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.7_2_1_8, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.7_1_8_4, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.6_7_4_8, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.6_6_5_6, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.6_6_1_4, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.6_4_5_6, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.6_4_2, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.6_4_1_9, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
]
] , )
@require_torch
@slow
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ : Dict = pipeline('''zero-shot-object-detection''' )
snake_case__ : Dict = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
] , )
snake_case__ : Optional[Any] = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
[
{'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
pass
@require_torch
@slow
def __magic_name__ ( self : Any ):
'''simple docstring'''
snake_case__ : Optional[Any] = 0.2
snake_case__ : Optional[int] = pipeline('''zero-shot-object-detection''' )
snake_case__ : int = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=_lowerCamelCase , )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
] , )
@require_torch
@slow
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
snake_case__ : Tuple = 2
snake_case__ : int = pipeline('''zero-shot-object-detection''' )
snake_case__ : List[str] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=_lowerCamelCase , )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
] , )
| 347 | '''simple docstring'''
from __future__ import annotations
def snake_case_ ( __snake_case : list[int | str]) -> None:
create_state_space_tree(__snake_case , [] , 0 , [0 for i in range(len(__snake_case))])
def snake_case_ ( __snake_case : list[int | str] , __snake_case : list[int | str] , __snake_case : int , __snake_case : list[int] , ) -> None:
if index == len(__snake_case):
print(__snake_case)
return
for i in range(len(__snake_case)):
if not index_used[i]:
current_sequence.append(sequence[i])
lowerCAmelCase_ = True
create_state_space_tree(__snake_case , __snake_case , index + 1 , __snake_case)
current_sequence.pop()
lowerCAmelCase_ = False
A_ : list[int | str] =[3, 1, 2, 4]
generate_all_permutations(sequence)
A_ : list[int | str] =["A", "B", "C"]
generate_all_permutations(sequence_a)
| 274 | 0 |
'''simple docstring'''
import math
from datetime import datetime, timedelta
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = year % 19
UpperCAmelCase : str = year % 4
UpperCAmelCase : Any = year % 7
UpperCAmelCase : List[Any] = math.floor(year / 100 )
UpperCAmelCase : List[str] = math.floor((13 + 8 * leap_day_inhibits) / 25 )
UpperCAmelCase : Optional[int] = leap_day_inhibits / 4
UpperCAmelCase : Tuple = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
UpperCAmelCase : List[str] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
UpperCAmelCase : Union[str, Any] = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
UpperCAmelCase : Union[str, Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__magic_name__ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__magic_name__ , 4 , 18 )
else:
return datetime(__magic_name__ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
a : Tuple = "will be" if year > datetime.now().year else "was"
print(F'Easter in {year} {tense} {gauss_easter(year)}')
| 609 |
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
a : Optional[Any] = "<<<<<<< This should probably be modified because it mentions: "
a : List[Any] = "=======\n>>>>>>>\n"
a : Union[str, Any] = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
a : str = [
# (pattern, replacement)
# Order is important here for some replacements
(R"tfds\.core", R"datasets"),
(R"tf\.io\.gfile\.GFile", R"open"),
(R"tf\.([\w\d]+)", R"datasets.Value('\1')"),
(R"tfds\.features\.Text\(\)", R"datasets.Value('string')"),
(R"tfds\.features\.Text\(", R"datasets.Value('string'),"),
(R"features\s*=\s*tfds.features.FeaturesDict\(", R"features=datasets.Features("),
(R"tfds\.features\.FeaturesDict\(", R"dict("),
(R"The TensorFlow Datasets Authors", R"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(R"tfds\.", R"datasets."),
(R"dl_manager\.manual_dir", R"self.config.data_dir"),
(R"self\.builder_config", R"self.config"),
]
def lowercase ( __magic_name__ ):
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
@staticmethod
def A_ ( snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=snake_case , required=snake_case , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=snake_case , required=snake_case , help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=snake_case )
def __init__( self , snake_case , snake_case , *snake_case ):
'''simple docstring'''
UpperCAmelCase : Any = get_logger("datasets-cli/converting" )
UpperCAmelCase : Dict = tfds_path
UpperCAmelCase : Optional[int] = datasets_directory
def A_ ( self ):
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
UpperCAmelCase : Optional[int] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
UpperCAmelCase : List[Any] = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
UpperCAmelCase : Optional[int] = os.path.abspath(self._datasets_directory )
self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}" )
UpperCAmelCase : int = []
UpperCAmelCase : int = []
UpperCAmelCase : Union[str, Any] = {}
if os.path.isdir(self._tfds_path ):
UpperCAmelCase : List[str] = os.listdir(snake_case )
else:
UpperCAmelCase : Union[str, Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"Looking at file {f_name}" )
UpperCAmelCase : Optional[Any] = os.path.join(snake_case , snake_case )
UpperCAmelCase : Any = os.path.join(snake_case , snake_case )
if not os.path.isfile(snake_case ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(snake_case , encoding="utf-8" ) as f:
UpperCAmelCase : str = f.readlines()
UpperCAmelCase : List[str] = []
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : Dict = False
UpperCAmelCase : Tuple = []
for line in lines:
UpperCAmelCase : Any = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
UpperCAmelCase : List[Any] = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
UpperCAmelCase : Any = ""
continue
elif "from absl import logging" in out_line:
UpperCAmelCase : List[str] = "from datasets import logging\n"
elif "getLogger" in out_line:
UpperCAmelCase : Union[str, Any] = out_line.replace("getLogger" , "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
UpperCAmelCase : Dict = True
UpperCAmelCase : Optional[Any] = list(filter(lambda snake_case : e in out_line , snake_case ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(snake_case ) + "\n" )
out_lines.append(snake_case )
out_lines.append(snake_case )
continue
else:
for pattern, replacement in TO_CONVERT:
UpperCAmelCase : Any = re.sub(snake_case , snake_case , snake_case )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
UpperCAmelCase : int = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , snake_case )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
UpperCAmelCase : Dict = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"Error converting {out_line.strip()}" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
UpperCAmelCase : Dict = True
out_lines.append(snake_case )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
UpperCAmelCase : List[str] = f_name.replace(".py" , "" )
UpperCAmelCase : str = os.path.join(snake_case , snake_case )
UpperCAmelCase : str = os.path.join(snake_case , snake_case )
os.makedirs(snake_case , exist_ok=snake_case )
self._logger.info(f"Adding directory {output_dir}" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(snake_case )
if needs_manual_update:
with_manual_update.append(snake_case )
with open(snake_case , "w" , encoding="utf-8" ) as f:
f.writelines(snake_case )
self._logger.info(f"Converted in {output_file}" )
for utils_file in utils_files:
try:
UpperCAmelCase : Optional[int] = os.path.basename(snake_case )
UpperCAmelCase : int = imports_to_builder_map[f_name.replace(".py" , "" )]
self._logger.info(f"Moving {dest_folder} to {utils_file}" )
shutil.copy(snake_case , snake_case )
except KeyError:
self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually." )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." )
| 609 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class __A ( snake_case_ ):
UpperCamelCase = '''mgp-str'''
def __init__( self :Any , __snake_case :Tuple=[32, 1_28] , __snake_case :List[Any]=4 , __snake_case :int=3 , __snake_case :int=27 , __snake_case :Union[str, Any]=38 , __snake_case :Dict=5_02_57 , __snake_case :Optional[Any]=3_05_22 , __snake_case :Optional[Any]=7_68 , __snake_case :Optional[int]=12 , __snake_case :int=12 , __snake_case :List[Any]=4.0 , __snake_case :Dict=True , __snake_case :str=False , __snake_case :List[str]=1E-5 , __snake_case :str=0.0 , __snake_case :Any=0.0 , __snake_case :List[str]=0.0 , __snake_case :Any=False , __snake_case :List[str]=0.02 , **__snake_case :Dict , ):
'''simple docstring'''
super().__init__(**__snake_case )
__magic_name__ : str =image_size
__magic_name__ : Dict =patch_size
__magic_name__ : List[str] =num_channels
__magic_name__ : Tuple =max_token_length
__magic_name__ : List[Any] =num_character_labels
__magic_name__ : int =num_bpe_labels
__magic_name__ : Any =num_wordpiece_labels
__magic_name__ : List[str] =hidden_size
__magic_name__ : Any =num_hidden_layers
__magic_name__ : Any =num_attention_heads
__magic_name__ : List[str] =mlp_ratio
__magic_name__ : Any =distilled
__magic_name__ : Tuple =layer_norm_eps
__magic_name__ : str =drop_rate
__magic_name__ : Tuple =qkv_bias
__magic_name__ : Union[str, Any] =attn_drop_rate
__magic_name__ : str =drop_path_rate
__magic_name__ : List[str] =output_aa_attentions
__magic_name__ : Tuple =initializer_range
| 21 |
'''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
A__ : Dict =''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class UpperCAmelCase ( tr.AbstractTransform ):
def __init__( self : Any , __snake_case : str = " " ) -> Any:
_lowerCAmelCase = sentence_delimiter
def lowercase__ ( self : str , __snake_case : str ) -> Optional[int]:
return list(__snake_case )
def lowercase__ ( self : Tuple , __snake_case : List[str] ) -> Union[str, Any]:
_lowerCAmelCase = []
for sent_idx, sentence in enumerate(__snake_case ):
chars.extend(self.process_string(__snake_case ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__snake_case ) - 1:
chars.append(self.sentence_delimiter )
return chars
A__ : Optional[Any] =tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
A__ : Tuple =tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
A__ : List[str] ='''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
A__ : Optional[int] ='''\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
'''
A__ : List[str] ='''
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> cer = datasets.load_metric("cer")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def lowercase__ ( self : Any ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
"""https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates""",
] , )
def lowercase__ ( self : Optional[Any] , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Tuple=False ) -> List[Any]:
if concatenate_texts:
return jiwer.compute_measures(
__snake_case , __snake_case , truth_transform=__snake_case , hypothesis_transform=__snake_case , )["wer"]
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for prediction, reference in zip(__snake_case , __snake_case ):
_lowerCAmelCase = jiwer.compute_measures(
__snake_case , __snake_case , truth_transform=__snake_case , hypothesis_transform=__snake_case , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 207 | 0 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def a_ ( _UpperCAmelCase : Namespace ) -> int:
return ConvertCommand(
args.model_type ,args.tf_checkpoint ,args.pytorch_dump_output ,args.config ,args.finetuning_task_name )
A__ : Any = '''
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
'''
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
@staticmethod
def A_ ( __a : ArgumentParser ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[Any] = parser.add_parser(
'convert' , help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' , )
train_parser.add_argument('--model_type' , type=__a , required=__a , help='Model\'s type.' )
train_parser.add_argument(
'--tf_checkpoint' , type=__a , required=__a , help='TensorFlow checkpoint path or folder.' )
train_parser.add_argument(
'--pytorch_dump_output' , type=__a , required=__a , help='Path to the PyTorch saved model output.' )
train_parser.add_argument('--config' , type=__a , default='' , help='Configuration file path or folder.' )
train_parser.add_argument(
'--finetuning_task_name' , type=__a , default=__a , help='Optional fine-tuning task name if the TF model was a finetuned model.' , )
train_parser.set_defaults(func=__a )
def __init__( self : Optional[Any] , __a : str , __a : str , __a : str , __a : str , __a : str , *__a : List[Any] , ) -> Tuple:
'''simple docstring'''
__snake_case : Union[str, Any] = logging.get_logger('transformers-cli/converting' )
self._logger.info(f'''Loading model {model_type}''' )
__snake_case : Any = model_type
__snake_case : Tuple = tf_checkpoint
__snake_case : int = pytorch_dump_output
__snake_case : Optional[int] = config
__snake_case : Optional[Any] = finetuning_task_name
def A_ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(__a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__a )
if "ckpt" in self._tf_checkpoint.lower():
__snake_case : Union[str, Any] = self._tf_checkpoint
__snake_case : List[str] = ''
else:
__snake_case : Any = self._tf_checkpoint
__snake_case : Optional[int] = ''
convert_transfo_xl_checkpoint_to_pytorch(
__a , self._config , self._pytorch_dump_output , __a )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__a )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__a )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]' )
| 124 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class snake_case__ :
def __init__( self : Optional[int] , __a : Any , __a : Optional[Any]=13 , __a : str=7 , __a : List[str]=True , __a : List[Any]=True , __a : Optional[Any]=True , __a : Optional[Any]=True , __a : Optional[int]=99 , __a : List[Any]=32 , __a : Optional[int]=2 , __a : Optional[Any]=4 , __a : Dict=37 , __a : str="gelu" , __a : str=0.1 , __a : List[Any]=0.1 , __a : Optional[Any]=512 , __a : Optional[int]=16 , __a : List[Any]=2 , __a : Any=0.0_2 , __a : Tuple=3 , __a : Optional[int]=4 , __a : List[str]=None , __a : str=1000 , ) -> Optional[int]:
'''simple docstring'''
__snake_case : Union[str, Any] = parent
__snake_case : List[Any] = batch_size
__snake_case : str = seq_length
__snake_case : List[str] = is_training
__snake_case : Union[str, Any] = use_input_mask
__snake_case : Tuple = use_token_type_ids
__snake_case : List[str] = use_labels
__snake_case : Optional[int] = vocab_size
__snake_case : List[str] = hidden_size
__snake_case : Any = num_hidden_layers
__snake_case : Optional[int] = num_attention_heads
__snake_case : List[Any] = intermediate_size
__snake_case : Tuple = hidden_act
__snake_case : Optional[int] = hidden_dropout_prob
__snake_case : Union[str, Any] = attention_probs_dropout_prob
__snake_case : Dict = max_position_embeddings
__snake_case : Any = type_vocab_size
__snake_case : List[Any] = type_sequence_label_size
__snake_case : Any = initializer_range
__snake_case : Union[str, Any] = num_labels
__snake_case : Tuple = num_choices
__snake_case : Tuple = scope
__snake_case : List[str] = range_bbox
def A_ ( self : int ) -> int:
'''simple docstring'''
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
__snake_case : Any = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case : List[Any] = bbox[i, j, 3]
__snake_case : Optional[int] = bbox[i, j, 1]
__snake_case : Optional[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case : int = bbox[i, j, 2]
__snake_case : Any = bbox[i, j, 0]
__snake_case : Any = t
__snake_case : Any = tf.convert_to_tensor(__a )
__snake_case : Optional[Any] = None
if self.use_input_mask:
__snake_case : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : str = None
if self.use_token_type_ids:
__snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : Any = None
__snake_case : str = None
__snake_case : Dict = None
if self.use_labels:
__snake_case : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : str = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : List[str] = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self : Optional[Any] , __a : Optional[int] , __a : Optional[Any] , __a : List[Any] , __a : int , __a : Union[str, Any] , __a : Optional[Any] , __a : int , __a : int ) -> Dict:
'''simple docstring'''
__snake_case : Tuple = TFLayoutLMModel(config=__a )
__snake_case : Union[str, Any] = model(__a , __a , attention_mask=__a , token_type_ids=__a )
__snake_case : str = model(__a , __a , token_type_ids=__a )
__snake_case : List[str] = model(__a , __a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ ( self : Dict , __a : Any , __a : Union[str, Any] , __a : Optional[int] , __a : int , __a : Optional[Any] , __a : str , __a : List[Any] , __a : List[str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Tuple = TFLayoutLMForMaskedLM(config=__a )
__snake_case : Union[str, Any] = model(__a , __a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : List[str] , __a : Any , __a : Dict , __a : List[str] , __a : Optional[Any] , __a : Dict , __a : str , __a : Optional[int] , __a : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = self.num_labels
__snake_case : str = TFLayoutLMForSequenceClassification(config=__a )
__snake_case : Any = model(__a , __a , attention_mask=__a , token_type_ids=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : Union[str, Any] , __a : List[Any] , __a : Optional[Any] , __a : List[Any] , __a : Optional[int] , __a : List[Any] , __a : Any , __a : Union[str, Any] , __a : Optional[int] ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[Any] = self.num_labels
__snake_case : Union[str, Any] = TFLayoutLMForTokenClassification(config=__a )
__snake_case : str = model(__a , __a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self : Tuple , __a : Optional[Any] , __a : List[str] , __a : str , __a : str , __a : Optional[int] , __a : Tuple , __a : Any , __a : List[str] ) -> List[str]:
'''simple docstring'''
__snake_case : Tuple = TFLayoutLMForQuestionAnswering(config=__a )
__snake_case : Optional[Any] = model(__a , __a , attention_mask=__a , token_type_ids=__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self : str ) -> str:
'''simple docstring'''
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = config_and_inputs
__snake_case : int = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class snake_case__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
A__ = (
{
'''feature-extraction''': TFLayoutLMModel,
'''fill-mask''': TFLayoutLMForMaskedLM,
'''text-classification''': TFLayoutLMForSequenceClassification,
'''token-classification''': TFLayoutLMForTokenClassification,
'''zero-shot''': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
A__ = False
A__ = True
A__ = 10
def A_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Tuple = TFLayoutLMModelTester(self )
__snake_case : List[Any] = ConfigTester(self , config_class=__a , hidden_size=37 )
def A_ ( self : Dict ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self : Tuple ) -> str:
'''simple docstring'''
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def A_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def A_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def A_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
def A_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
@slow
def A_ ( self : List[str] ) -> int:
'''simple docstring'''
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Optional[int] = TFLayoutLMModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def A_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
pass
def a_ ( ) -> Optional[int]:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
__snake_case : Optional[Any] = tf.convert_to_tensor([[1_01,10_19,10_14,10_16,10_37,1_28_49,47_47,10_04,1_42_46,22_78,54_39,45_24,50_02,29_30,21_93,29_30,43_41,32_08,10_05,10_55,21_71,28_48,1_13_00,35_31,1_02],[1_01,40_70,40_34,70_20,10_24,30_58,10_15,10_13,28_61,10_13,60_70,1_92_74,27_72,62_05,2_78_14,1_61_47,1_61_47,43_43,20_47,1_02_83,1_09_69,1_43_89,10_12,23_38,1_02]] ) # noqa: E231
__snake_case : List[Any] = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
__snake_case : Optional[int] = tf.convert_to_tensor([[[0,0,0,0],[4_23,2_37,4_40,2_51],[4_27,2_72,4_41,2_87],[4_19,1_15,4_37,1_29],[9_61,8_85,9_92,9_12],[2_56,38,3_30,58],[2_56,38,3_30,58],[3_36,42,3_53,57],[3_60,39,4_01,56],[3_60,39,4_01,56],[4_11,39,4_71,59],[4_79,41,5_28,59],[5_33,39,6_30,60],[67,1_13,1_34,1_31],[1_41,1_15,2_09,1_32],[68,1_49,1_33,1_66],[1_41,1_49,1_87,1_64],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[2_95,1_48,3_49,1_65],[4_41,1_49,4_92,1_66],[4_97,1_49,5_46,1_64],[64,2_01,1_25,2_18],[10_00,10_00,10_00,10_00]],[[0,0,0,0],[6_62,1_50,7_54,1_66],[6_65,1_99,7_42,2_11],[5_19,2_13,5_54,2_28],[5_19,2_13,5_54,2_28],[1_34,4_33,1_87,4_54],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[3_14,4_69,3_76,4_82],[5_04,6_84,5_82,7_06],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[6_10,7_49,6_52,7_65],[1_30,6_59,1_68,6_72],[1_76,6_57,2_37,6_72],[2_38,6_57,3_12,6_72],[4_43,6_53,6_28,6_72],[4_43,6_53,6_28,6_72],[7_16,3_01,8_25,3_17],[10_00,10_00,10_00,10_00]]] ) # noqa: E231
__snake_case : Union[str, Any] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
__snake_case : int = tf.convert_to_tensor([[-1_00,10,10,10,9,1,-1_00,7,7,-1_00,7,7,4,2,5,2,8,8,-1_00,-1_00,5,0,3,2,-1_00],[-1_00,12,12,12,-1_00,12,10,-1_00,-1_00,-1_00,-1_00,10,12,9,-1_00,-1_00,-1_00,10,10,10,9,12,-1_00,10,-1_00]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class snake_case__ ( unittest.TestCase ):
@slow
def A_ ( self : Dict ) -> Tuple:
'''simple docstring'''
__snake_case : str = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Dict = prepare_layoutlm_batch_inputs()
# forward pass
__snake_case : List[str] = model(input_ids=__a , bbox=__a , attention_mask=__a , token_type_ids=__a )
# test the sequence output on [0, :3, :3]
__snake_case : Tuple = tf.convert_to_tensor(
[[0.1_7_8_5, -0.1_9_4_7, -0.0_4_2_5], [-0.3_2_5_4, -0.2_8_0_7, 0.2_5_5_3], [-0.5_3_9_1, -0.3_3_2_2, 0.3_3_6_4]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1e-3 ) )
# test the pooled output on [1, :3]
__snake_case : Any = tf.convert_to_tensor([-0.6_5_8_0, -0.0_2_1_4, 0.8_5_5_2] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , __a , atol=1e-3 ) )
@slow
def A_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
# initialize model with randomly initialized sequence classification head
__snake_case : Optional[Any] = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = prepare_layoutlm_batch_inputs()
# forward pass
__snake_case : int = model(
input_ids=__a , bbox=__a , attention_mask=__a , token_type_ids=__a , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
__snake_case : Optional[int] = outputs.loss
__snake_case : Tuple = (2,)
self.assertEqual(loss.shape , __a )
# test the shape of the logits
__snake_case : Optional[int] = outputs.logits
__snake_case : str = (2, 2)
self.assertEqual(logits.shape , __a )
@slow
def A_ ( self : List[str] ) -> Dict:
'''simple docstring'''
# initialize model with randomly initialized token classification head
__snake_case : Dict = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=13 )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Any = prepare_layoutlm_batch_inputs()
# forward pass
__snake_case : List[str] = model(
input_ids=__a , bbox=__a , attention_mask=__a , token_type_ids=__a , labels=__a )
# test the shape of the logits
__snake_case : Any = outputs.logits
__snake_case : Optional[int] = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , __a )
@slow
def A_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
# initialize model with randomly initialized token classification head
__snake_case : List[str] = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : int = prepare_layoutlm_batch_inputs()
# forward pass
__snake_case : Optional[Any] = model(input_ids=__a , bbox=__a , attention_mask=__a , token_type_ids=__a )
# test the shape of the logits
__snake_case : Optional[Any] = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , __a )
self.assertEqual(outputs.end_logits.shape , __a )
| 124 | 1 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __UpperCamelCase ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
__A : int = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def A__ ( ) -> Union[str, Any]:
"""simple docstring"""
if os.name == "nt":
_UpperCAmelCase = CursorInfo()
_UpperCAmelCase = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE_ , ctypes.byref(SCREAMING_SNAKE_CASE_ ) )
_UpperCAmelCase = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE_ , ctypes.byref(SCREAMING_SNAKE_CASE_ ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def A__ ( ) -> List[Any]:
"""simple docstring"""
if os.name == "nt":
_UpperCAmelCase = CursorInfo()
_UpperCAmelCase = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE_ , ctypes.byref(SCREAMING_SNAKE_CASE_ ) )
_UpperCAmelCase = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE_ , ctypes.byref(SCREAMING_SNAKE_CASE_ ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def A__ ( ) -> str:
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor() | 32 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
def lowercase__ ( lowerCAmelCase__ : Union[tf.Tensor, np.ndarray] ) -> List[int]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , np.ndarray ):
return list(tensor.shape )
a__ : Optional[int] = tf.shape(lowerCAmelCase__ )
if tensor.shape == tf.TensorShape(lowerCAmelCase__ ):
return dynamic
a__ : Union[str, Any] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(lowerCAmelCase__ )]
def lowercase__ ( lowerCAmelCase__ : tf.Tensor , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[str] = None ) -> tf.Tensor:
'''simple docstring'''
return tf.nn.softmax(logits=logits + 1E-9 , axis=lowerCAmelCase__ , name=lowerCAmelCase__ )
def lowercase__ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any]=1E-5 , lowerCAmelCase__ : str=-1 ) -> int:
'''simple docstring'''
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." )
# Get mean and variance on the axis to be normalized
a__ , a__ : Dict = tf.nn.moments(lowerCAmelCase__ , axes=[axis] , keepdims=lowerCAmelCase__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
a__ : Dict = [1] * inputs.shape.rank
a__ : Dict = shape_list(lowerCAmelCase__ )[axis]
a__ : Any = tf.reshape(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : List[Any] = tf.reshape(lowerCAmelCase__ , lowerCAmelCase__ )
# Compute layer normalization using the batch_normalization
# function.
a__ : List[str] = tf.nn.batch_normalization(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , offset=lowerCAmelCase__ , scale=lowerCAmelCase__ , variance_epsilon=lowerCAmelCase__ , )
return outputs
def lowercase__ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any=0 , lowerCAmelCase__ : List[str]=-1 ) -> List[str]:
'''simple docstring'''
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
a__ : Optional[int] = tf.shape(lowerCAmelCase__ )
a__ : str = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
a__ : Union[str, Any] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase__ ( lowerCAmelCase__ : tf.Tensor ) -> tf.Tensor:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , tf.Tensor ):
a__ : Optional[Any] = tf.convert_to_tensor(lowerCAmelCase__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
a__ : Tuple = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
a__ : int = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
a__ : int = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def lowercase__ ( lowerCAmelCase__ : tf.Tensor , lowerCAmelCase__ : int , lowerCAmelCase__ : str = "input_ids" ) -> None:
'''simple docstring'''
tf.debugging.assert_less(
lowerCAmelCase__ , tf.cast(lowerCAmelCase__ , dtype=tensor.dtype ) , message=(
F"The maximum value of {tensor_name} ({tf.math.reduce_max(lowerCAmelCase__ )}) must be smaller than the embedding "
F"layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."
) , )
def lowercase__ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict ) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] = 6_4_5_1_2
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
a__ : Optional[Any] = [x for x in data if len(lowerCAmelCase__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"The following attributes cannot be saved to HDF5 file because "
F"they are larger than {HDF5_OBJECT_HEADER_LIMIT} "
F"bytes: {bad_attributes}" )
a__ : List[str] = np.asarray(lowerCAmelCase__ )
a__ : List[str] = 1
a__ : str = np.array_split(lowerCAmelCase__ , lowerCAmelCase__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
a__ : str = np.array_split(lowerCAmelCase__ , lowerCAmelCase__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(lowerCAmelCase__ ):
a__ : List[Any] = chunk_data
else:
a__ : List[str] = data
def lowercase__ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple ) -> Tuple:
'''simple docstring'''
if name in group.attrs:
a__ : str = [n.decode("utf8" ) if hasattr(lowerCAmelCase__ , "decode" ) else n for n in group.attrs[name]]
else:
a__ : str = []
a__ : Any = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("utf8" ) if hasattr(lowerCAmelCase__ , "decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] )
chunk_id += 1
return data
def lowercase__ ( lowerCAmelCase__ : Tuple ) -> List[Any]:
'''simple docstring'''
def _expand_single_ad_tensor(lowerCAmelCase__ : List[str] ):
if isinstance(lowerCAmelCase__ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(lowerCAmelCase__ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , lowerCAmelCase__ ) | 642 | 0 |
"""simple docstring"""
from __future__ import annotations
def snake_case ( A__ ):
if not nums:
return 0
UpperCAmelCase_ : Union[str, Any] = nums[0]
UpperCAmelCase_ : str = 0
for num in nums[1:]:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = (
max_excluding + num,
max(A__ ,A__ ),
)
return max(A__ ,A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 463 |
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class UpperCamelCase_ (__A ):
def __init__( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : Tuple ) -> Dict:
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : Optional[int] = config_class
UpperCAmelCase_ : List[str] = has_text_modality
UpperCAmelCase_ : Tuple = kwargs
UpperCAmelCase_ : int = common_properties
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = self.config_class(**self.inputs_dict )
UpperCAmelCase_ : int = (
["hidden_size", "num_attention_heads", "num_hidden_layers"]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) , msg=f"""`{prop}` does not exist""" )
# Test that config has the common properties as setter
for idx, name in enumerate(lowerCAmelCase_ ):
try:
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
self.parent.assertEqual(
getattr(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ , msg=f"""`{name} value {idx} expected, but was {getattr(lowerCAmelCase_ , lowerCAmelCase_ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(lowerCAmelCase_ ):
try:
UpperCAmelCase_ : Optional[Any] = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ , msg=f"""`{name} value {idx} expected, but was {getattr(lowerCAmelCase_ , lowerCAmelCase_ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
UpperCAmelCase_ : str = self.config_class(**self.inputs_dict )
UpperCAmelCase_ : List[str] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
UpperCAmelCase_ : Optional[Any] = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : List[str] = os.path.join(lowerCAmelCase_ , "config.json" )
config_first.to_json_file(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = self.config_class.from_json_file(lowerCAmelCase_ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
UpperCAmelCase_ : Any = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = self.config_class.from_pretrained(lowerCAmelCase_ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
UpperCAmelCase_ : Any = self.config_class(**self.inputs_dict )
UpperCAmelCase_ : int = "test"
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : Optional[int] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
config_first.save_pretrained(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = self.config_class.from_pretrained(lowerCAmelCase_ , subfolder=lowerCAmelCase_ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase_ : List[str] = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
UpperCAmelCase_ : List[Any] = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
if self.config_class.is_composition:
return
UpperCAmelCase_ : str = self.config_class()
self.parent.assertIsNotNone(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
UpperCAmelCase_ : Optional[int] = copy.deepcopy(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = self.config_class(**lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) )
elif getattr(lowerCAmelCase_ , lowerCAmelCase_ ) != value:
wrong_values.append((key, getattr(lowerCAmelCase_ , lowerCAmelCase_ ), value) )
if len(lowerCAmelCase_ ) > 0:
UpperCAmelCase_ : Any = "\n".join([f"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] )
raise ValueError(f"""The following keys were not properly set in the config:\n{errors}""" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 463 | 1 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
A = True
from torch.cuda.amp import autocast
A = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__A = field(
default=__snake_case , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__A = field(
default=__snake_case , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
__A = field(
default=__snake_case , metadata={"""help""": """Whether to log verbose messages or not."""} , )
__A = field(
default=2.0 , metadata={"""help""": """Maximum temperature for gumbel softmax."""} )
__A = field(
default=0.5 , metadata={"""help""": """Minimum temperature for gumbel softmax."""} )
__A = field(
default=0.99_99_95 , metadata={"""help""": """Decay of gumbel temperature during training."""} )
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
snake_case_ = logging.WARNING
if model_args.verbose_logging:
snake_case_ = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
snake_case_ = logging.INFO
logger.setLevel(lowercase__ )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = field(
default=__snake_case , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
__A = field(
default=__snake_case , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
__A = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
__A = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"""
)
} , )
__A = field(
default="""file""" , metadata={"""help""": """Column in the dataset that contains speech file path. Defaults to 'file'"""} , )
__A = field(
default=__snake_case , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
__A = field(
default=1 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
__A = field(
default=__snake_case , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__A = field(
default=20.0 , metadata={"""help""": """Filter audio files that are longer than `max_duration_in_seconds` seconds"""} )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = 42
__A = 42
__A = "longest"
__A = None
__A = None
def __call__( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.feature_extractor.pad(
__UpperCamelCase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
snake_case_ = self.model._get_feat_extract_output_lengths(batch['input_values'].shape[-1] )
snake_case_ = batch['input_values'].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
snake_case_ = self.model._get_feat_extract_output_lengths(batch['attention_mask'].sum(-1 ) ).to(
torch.long )
snake_case_ = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['input_values'].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
snake_case_ = 1
snake_case_ = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
snake_case_ = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=__UpperCamelCase , min_masks=2 , )
return batch
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
def __init__( self , *__UpperCamelCase , __UpperCamelCase=1 , __UpperCamelCase=0 , __UpperCamelCase=1.0 , **__UpperCamelCase ):
"""simple docstring"""
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
snake_case_ = 0
snake_case_ = max_gumbel_temp
snake_case_ = min_gumbel_temp
snake_case_ = gumbel_temp_decay
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
model.train()
snake_case_ = self._prepare_inputs(__UpperCamelCase )
if self.use_amp:
with autocast():
snake_case_ = self.compute_loss(__UpperCamelCase , __UpperCamelCase )
else:
snake_case_ = self.compute_loss(__UpperCamelCase , __UpperCamelCase )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
snake_case_ = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
snake_case_ = loss.sum() / (inputs['mask_time_indices']).sum()
else:
raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
snake_case_ = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__UpperCamelCase ).backward()
elif self.use_apex:
with amp.scale_loss(__UpperCamelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__UpperCamelCase )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def a():
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses()
configure_logger(lowercase__ , lowercase__ )
# Downloading and loading a dataset from the hub.
snake_case_ = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
snake_case_ = DatasetDict()
snake_case_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""" , cache_dir=model_args.cache_dir , )
snake_case_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
snake_case_ = DatasetDict()
snake_case_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='validation' , cache_dir=model_args.cache_dir , )
snake_case_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}""" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
snake_case_ = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=lowercase__ )
def prepare_dataset(lowercase__ ):
# check that all files have the correct sampling rate
snake_case_ , snake_case_ = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
snake_case_ = datasets.map(
lowercase__ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['train'].column_names )
# filter audio files that are too long
snake_case_ = vectorized_datasets.filter(
lambda lowercase__ : len(data['speech'] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(lowercase__ ):
return feature_extractor(batch['speech'] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
snake_case_ = vectorized_datasets.map(
lowercase__ , batched=lowercase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['train'].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
snake_case_ = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'
' ``config.feat_extract_norm=\'layer\'' )
snake_case_ = WavaVecaForPreTraining(lowercase__ )
snake_case_ = DataCollatorForWavaVecaPretraining(model=lowercase__ , feature_extractor=lowercase__ )
snake_case_ = WavaVecaPreTrainer(
model=lowercase__ , data_collator=lowercase__ , args=lowercase__ , train_dataset=vectorized_datasets['train'] , eval_dataset=vectorized_datasets['validation'] , tokenizer=lowercase__ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 187 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 187 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> YolosConfig:
"""simple docstring"""
__lowerCamelCase = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
__lowerCamelCase = 192
__lowerCamelCase = 768
__lowerCamelCase = 12
__lowerCamelCase = 3
__lowerCamelCase = [800, 1333]
__lowerCamelCase = False
elif yolos_name == "yolos_s_dWr":
__lowerCamelCase = 330
__lowerCamelCase = 14
__lowerCamelCase = 6
__lowerCamelCase = 1320
elif "yolos_s" in yolos_name:
__lowerCamelCase = 384
__lowerCamelCase = 1536
__lowerCamelCase = 12
__lowerCamelCase = 6
elif "yolos_b" in yolos_name:
__lowerCamelCase = [800, 1344]
__lowerCamelCase = 91
__lowerCamelCase = 'huggingface/label-files'
__lowerCamelCase = 'coco-detection-id2label.json'
__lowerCamelCase = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
__lowerCamelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( UpperCamelCase__ : dict , UpperCamelCase__ : YolosConfig , UpperCamelCase__ : bool = False ) -> List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCamelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
__lowerCamelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase = in_proj_weight[: config.hidden_size, :]
__lowerCamelCase = in_proj_bias[: config.hidden_size]
__lowerCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCamelCase = in_proj_weight[-config.hidden_size :, :]
__lowerCamelCase = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> str:
"""simple docstring"""
if "backbone" in name:
__lowerCamelCase = name.replace('backbone' , 'vit' )
if "cls_token" in name:
__lowerCamelCase = name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
__lowerCamelCase = name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
__lowerCamelCase = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
__lowerCamelCase = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
__lowerCamelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
__lowerCamelCase = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
__lowerCamelCase = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__lowerCamelCase = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__lowerCamelCase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__lowerCamelCase = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__lowerCamelCase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__lowerCamelCase = name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
__lowerCamelCase = name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
__lowerCamelCase = name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
__lowerCamelCase = name.replace('vit.norm' , 'vit.layernorm' )
return name
def lowerCamelCase_ ( UpperCamelCase__ : dict , UpperCamelCase__ : YolosForObjectDetection ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__lowerCamelCase = orig_state_dict.pop(UpperCamelCase__ )
if "qkv" in key:
__lowerCamelCase = key.split('.' )
__lowerCamelCase = int(key_split[2] )
__lowerCamelCase = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
__lowerCamelCase = val[:dim, :]
__lowerCamelCase = val[
dim : dim * 2, :
]
__lowerCamelCase = val[-dim:, :]
else:
__lowerCamelCase = val[:dim]
__lowerCamelCase = val[dim : dim * 2]
__lowerCamelCase = val[-dim:]
else:
__lowerCamelCase = val
return orig_state_dict
def lowerCamelCase_ ( ) -> torch.Tensor:
"""simple docstring"""
__lowerCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCamelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : bool = False ) -> Dict:
"""simple docstring"""
__lowerCamelCase = get_yolos_config(UpperCamelCase__ )
# load original state_dict
__lowerCamelCase = torch.load(UpperCamelCase__ , map_location='cpu' )['model']
# load 🤗 model
__lowerCamelCase = YolosForObjectDetection(UpperCamelCase__ )
model.eval()
__lowerCamelCase = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image, prepared by YolosImageProcessor
__lowerCamelCase = 800 if yolos_name != 'yolos_ti' else 512
__lowerCamelCase = YolosImageProcessor(format='coco_detection' , size=UpperCamelCase__ )
__lowerCamelCase = image_processor(images=prepare_img() , return_tensors='pt' )
__lowerCamelCase = model(**UpperCamelCase__ )
__lowerCamelCase = outputs.logits, outputs.pred_boxes
__lowerCamelCase = None, None
if yolos_name == "yolos_ti":
__lowerCamelCase = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
__lowerCamelCase = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
__lowerCamelCase = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
__lowerCamelCase = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
__lowerCamelCase = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
__lowerCamelCase = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
__lowerCamelCase = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
__lowerCamelCase = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
__lowerCamelCase = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
__lowerCamelCase = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase__ , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , UpperCamelCase__ , atol=1E-4 )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
__lowerCamelCase = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
__lowerCamelCase = model_mapping[yolos_name]
image_processor.push_to_hub(UpperCamelCase__ , organization='hustvl' )
model.push_to_hub(UpperCamelCase__ , organization='hustvl' )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__A = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 707 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = ShapEImgaImgPipeline
snake_case_ = ['''image''']
snake_case_ = ['''image''']
snake_case_ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
snake_case_ = False
@property
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
return 32
@property
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
return 32
@property
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowercase_ ( self ) -> Any:
'''simple docstring'''
return 8
@property
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__lowerCamelCase = CLIPVisionModel(lowerCamelCase__ )
return model
@property
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = CLIPImageProcessor(
crop_size=224 , do_center_crop=lowerCamelCase__ , do_normalize=lowerCamelCase__ , do_resize=lowerCamelCase__ , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
@property
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__lowerCamelCase = PriorTransformer(**lowerCamelCase__ )
return model
@property
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__lowerCamelCase = ShapERenderer(**lowerCamelCase__ )
return model
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.dummy_prior
__lowerCamelCase = self.dummy_image_encoder
__lowerCamelCase = self.dummy_image_processor
__lowerCamelCase = self.dummy_renderer
__lowerCamelCase = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=lowerCamelCase__ , clip_sample=lowerCamelCase__ , clip_sample_range=1.0 , )
__lowerCamelCase = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=0 ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
if str(lowerCamelCase__ ).startswith('mps' ):
__lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
else:
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__lowerCamelCase = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = 'cpu'
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**lowerCamelCase__ )
__lowerCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
__lowerCamelCase = output.images[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCamelCase = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = torch_device == 'cpu'
__lowerCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCamelCase__ , relax_max_difference=lowerCamelCase__ , )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**lowerCamelCase__ )
__lowerCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 1
__lowerCamelCase = 2
__lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
for key in inputs.keys():
if key in self.batch_params:
__lowerCamelCase = batch_size * [inputs[key]]
__lowerCamelCase = pipe(**lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Any:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
__lowerCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
__lowerCamelCase = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
__lowerCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = pipe(
lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
| 167 | 0 |
'''simple docstring'''
import operator as op
__SCREAMING_SNAKE_CASE : int = """scaler.pt"""
__SCREAMING_SNAKE_CASE : Any = """pytorch_model"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = """random_states"""
__SCREAMING_SNAKE_CASE : List[str] = """optimizer"""
__SCREAMING_SNAKE_CASE : List[str] = """scheduler"""
__SCREAMING_SNAKE_CASE : int = """pytorch_model.bin"""
__SCREAMING_SNAKE_CASE : int = """pytorch_model.bin.index.json"""
__SCREAMING_SNAKE_CASE : Tuple = """model.safetensors"""
__SCREAMING_SNAKE_CASE : Dict = """model.safetensors.index.json"""
__SCREAMING_SNAKE_CASE : Any = """1.10.2"""
__SCREAMING_SNAKE_CASE : Tuple = """py38"""
__SCREAMING_SNAKE_CASE : str = """4.17.0"""
__SCREAMING_SNAKE_CASE : Dict = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""]
__SCREAMING_SNAKE_CASE : Any = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""]
__SCREAMING_SNAKE_CASE : Optional[Any] = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""]
__SCREAMING_SNAKE_CASE : List[str] = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""]
__SCREAMING_SNAKE_CASE : List[str] = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""]
__SCREAMING_SNAKE_CASE : str = """2.0.1"""
__SCREAMING_SNAKE_CASE : int = ["""pdsh""", """standard""", """openmpi""", """mvapich"""]
__SCREAMING_SNAKE_CASE : List[Any] = ["""default""", """reduce-overhead""", """max-autotune"""]
__SCREAMING_SNAKE_CASE : str = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
__SCREAMING_SNAKE_CASE : str = [
"""nnodes""",
"""nproc_per_node""",
"""rdzv_backend""",
"""rdzv_endpoint""",
"""rdzv_id""",
"""rdzv_conf""",
"""standalone""",
"""max_restarts""",
"""monitor_interval""",
"""start_method""",
"""role""",
"""module""",
"""m""",
"""no_python""",
"""run_path""",
"""log_dir""",
"""r""",
"""redirects""",
"""t""",
"""tee""",
"""node_rank""",
"""master_addr""",
"""master_port""",
]
__SCREAMING_SNAKE_CASE : int = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""]
__SCREAMING_SNAKE_CASE : List[Any] = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""]
| 244 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__SCREAMING_SNAKE_CASE : List[str] = {"""configuration_glpn""": ["""GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GLPNConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = ["""GLPNFeatureExtractor"""]
__SCREAMING_SNAKE_CASE : str = ["""GLPNImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = [
"""GLPN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GLPNForDepthEstimation""",
"""GLPNLayer""",
"""GLPNModel""",
"""GLPNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 244 | 1 |
from __future__ import annotations
import math
def lowerCamelCase__ ( a : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase__ ( a : Optional[int] ) -> Any:
"""simple docstring"""
a__ = str(a )
a__ = [n]
for i in range(1 , len(a ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def lowerCamelCase__ ( a : Tuple ) -> Optional[int]:
"""simple docstring"""
if len(str(a ) ) > 3:
if not is_prime(int(str(a )[-3:] ) ) or not is_prime(int(str(a )[:3] ) ):
return False
return True
def lowerCamelCase__ ( a : List[str] = 11 ) -> Dict:
"""simple docstring"""
a__ = []
a__ = 13
while len(a ) != count:
if validate(a ):
a__ = list_truncated_nums(a )
if all(is_prime(a ) for i in list_nums ):
list_truncated_primes.append(a )
num += 2
return list_truncated_primes
def lowerCamelCase__ ( ) -> List[str]:
"""simple docstring"""
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f'''{sum(compute_truncated_primes(11)) = }''')
| 701 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
snake_case__ = False
class lowerCAmelCase_ ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase):
def _snake_case ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : Dict ) ->Any:
"""simple docstring"""
a__ :Union[str, Any] = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
a__ :List[Any] = "A painting of a squirrel eating a burger "
a__ :Optional[Any] = torch.manual_seed(0 )
a__ :List[Any] = pipe(
prompt=__A , generator=__A , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__A )
a__ :List[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
a__ :Optional[int] = generator.manual_seed(0 )
a__ :List[Any] = pipe(
prompt=__A , generator=__A , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _snake_case ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
a__ :Tuple = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
a__ :Tuple = "A painting of a squirrel eating a burger "
a__ :Tuple = torch.manual_seed(0 )
a__ :Optional[Any] = pipe(
prompt=__A , generator=__A , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
a__ :Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
a__ :Tuple = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 373 | 0 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__UpperCAmelCase = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class __lowercase ( unittest.TestCase ):
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir ,"""schedulers/""" ) )
UpperCAmelCase__ : Optional[Any] = self.diffusers_dir
shutil.copy(
os.path.join(A ,"""src/diffusers/schedulers/scheduling_ddpm.py""" ) ,os.path.join(self.diffusers_dir ,"""schedulers/scheduling_ddpm.py""" ) ,)
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = """src/diffusers"""
shutil.rmtree(self.diffusers_dir )
def __lowercase ( self : List[str] ,A : Dict ,A : List[Any] ,A : int ,A : Tuple=None ):
'''simple docstring'''
UpperCAmelCase__ : Dict = comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
UpperCAmelCase__ : str = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
UpperCAmelCase__ : Tuple = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=119 )
UpperCAmelCase__ : List[str] = black.format_str(A ,mode=A )
UpperCAmelCase__ : List[str] = os.path.join(self.diffusers_dir ,"""new_code.py""" )
with open(A ,"""w""" ,newline="""\n""" ) as f:
f.write(A )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(A ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name ,overwrite=A )
with open(A ,"""r""" ) as f:
self.assertTrue(f.read() ,A )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = check_copies.find_code_in_diffusers("""schedulers.scheduling_ddpm.DDPMSchedulerOutput""" )
self.assertEqual(A ,A )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
# Base copy consistency
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" ,"""DDPMSchedulerOutput""" ,REFERENCE_CODE + """\n""" ,)
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" ,"""DDPMSchedulerOutput""" ,A ,)
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" ,"""TestSchedulerOutput""" ,re.sub("""DDPM""" ,"""Test""" ,A ) ,)
# Copy consistency with a really long name
UpperCAmelCase__ : Optional[Any] = """TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}" ,f"{long_class_name}SchedulerOutput" ,re.sub("""Bert""" ,A ,A ) ,)
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" ,"""TestSchedulerOutput""" ,A ,overwrite_result=re.sub("""DDPM""" ,"""Test""" ,A ) ,)
| 65 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class a__ ( a_ ):
'''simple docstring'''
A : Union[str, Any] = '''roberta'''
def __init__( self : Any , lowerCAmelCase_ : int=50_265 , lowerCAmelCase_ : int=768 , lowerCAmelCase_ : Optional[Any]=12 , lowerCAmelCase_ : Tuple=12 , lowerCAmelCase_ : Optional[int]=3_072 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : List[Any]=512 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : List[Any]=0.02 , lowerCAmelCase_ : Optional[int]=1E-12 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Dict=0 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : List[str]="absolute" , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Dict=None , **lowerCAmelCase_ : Any , ) -> int:
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
__A= vocab_size
__A= hidden_size
__A= num_hidden_layers
__A= num_attention_heads
__A= hidden_act
__A= intermediate_size
__A= hidden_dropout_prob
__A= attention_probs_dropout_prob
__A= max_position_embeddings
__A= type_vocab_size
__A= initializer_range
__A= layer_norm_eps
__A= position_embedding_type
__A= use_cache
__A= classifier_dropout
class a__ ( a_ ):
'''simple docstring'''
@property
def lowerCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__A= {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__A= {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 186 | 0 |
import qiskit
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[Any] = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
lowercase__ : Any = qiskit.QuantumCircuit(lowerCamelCase__ , lowerCamelCase__ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
lowercase__ : List[str] = qiskit.execute(lowerCamelCase__ , lowerCamelCase__ , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowerCamelCase__ )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 81 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """deformable_detr"""
lowercase_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : int=300 , SCREAMING_SNAKE_CASE : Any=1_024 , SCREAMING_SNAKE_CASE : Dict=6 , SCREAMING_SNAKE_CASE : Optional[int]=1_024 , SCREAMING_SNAKE_CASE : Optional[int]=8 , SCREAMING_SNAKE_CASE : str=6 , SCREAMING_SNAKE_CASE : Optional[int]=1_024 , SCREAMING_SNAKE_CASE : Optional[Any]=8 , SCREAMING_SNAKE_CASE : List[Any]=0.0 , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : List[str]="relu" , SCREAMING_SNAKE_CASE : List[Any]=256 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=0.0 , SCREAMING_SNAKE_CASE : List[str]=0.0 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : Any=1.0 , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : str=False , SCREAMING_SNAKE_CASE : Optional[int]="sine" , SCREAMING_SNAKE_CASE : List[str]="resnet50" , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : Optional[Any]=4 , SCREAMING_SNAKE_CASE : List[str]=4 , SCREAMING_SNAKE_CASE : Tuple=4 , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Tuple=300 , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : Tuple=1 , SCREAMING_SNAKE_CASE : Any=5 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Optional[Any]=1 , SCREAMING_SNAKE_CASE : str=1 , SCREAMING_SNAKE_CASE : List[str]=5 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.25 , SCREAMING_SNAKE_CASE : str=False , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowercase__ : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : List[Any] = backbone_config.get("model_type" )
lowercase__ : Any = CONFIG_MAPPING[backbone_model_type]
lowercase__ : str = config_class.from_dict(SCREAMING_SNAKE_CASE )
lowercase__ : int = use_timm_backbone
lowercase__ : Optional[Any] = backbone_config
lowercase__ : Union[str, Any] = num_channels
lowercase__ : List[Any] = num_queries
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : Union[str, Any] = d_model
lowercase__ : Union[str, Any] = encoder_ffn_dim
lowercase__ : Optional[Any] = encoder_layers
lowercase__ : Optional[Any] = encoder_attention_heads
lowercase__ : Optional[Any] = decoder_ffn_dim
lowercase__ : List[Any] = decoder_layers
lowercase__ : Optional[int] = decoder_attention_heads
lowercase__ : str = dropout
lowercase__ : Union[str, Any] = attention_dropout
lowercase__ : List[str] = activation_dropout
lowercase__ : Optional[Any] = activation_function
lowercase__ : Optional[Any] = init_std
lowercase__ : str = init_xavier_std
lowercase__ : Any = encoder_layerdrop
lowercase__ : int = auxiliary_loss
lowercase__ : Dict = position_embedding_type
lowercase__ : int = backbone
lowercase__ : Optional[Any] = use_pretrained_backbone
lowercase__ : List[Any] = dilation
# deformable attributes
lowercase__ : Dict = num_feature_levels
lowercase__ : Optional[int] = encoder_n_points
lowercase__ : Any = decoder_n_points
lowercase__ : int = two_stage
lowercase__ : int = two_stage_num_proposals
lowercase__ : Union[str, Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
lowercase__ : List[Any] = class_cost
lowercase__ : Optional[int] = bbox_cost
lowercase__ : Any = giou_cost
# Loss coefficients
lowercase__ : List[str] = mask_loss_coefficient
lowercase__ : int = dice_loss_coefficient
lowercase__ : Any = bbox_loss_coefficient
lowercase__ : Any = giou_loss_coefficient
lowercase__ : Optional[int] = eos_coefficient
lowercase__ : int = focal_alpha
lowercase__ : Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def snake_case ( self : List[Any] ):
return self.encoder_attention_heads
@property
def snake_case ( self : Union[str, Any] ):
return self.d_model
def snake_case ( self : str ):
lowercase__ : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowercase__ : int = self.backbone_config.to_dict()
lowercase__ : Union[str, Any] = self.__class__.model_type
return output
| 81 | 1 |
"""simple docstring"""
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = '''▁'''
_lowercase = {'''vocab_file''': '''prophetnet.tokenizer'''}
_lowercase = {
'''vocab_file''': {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'''
),
}
}
_lowercase = {
'''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False},
}
_lowercase = {
'''microsoft/xprophetnet-large-wiki100-cased''': 5_12,
}
def _snake_case ( snake_case__ : Optional[Any] ):
A = collections.OrderedDict()
with open(snake_case__ , 'r' , encoding='utf-8' ) as reader:
A = reader.readlines()
for index, token in enumerate(snake_case__ ):
A = token.rstrip('\n' )
A = index
return vocab
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = VOCAB_FILES_NAMES
_lowerCamelCase: Tuple = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase: Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self : int ,A_ : List[str] ,A_ : Dict="[SEP]" ,A_ : List[str]="[SEP]" ,A_ : Optional[int]="[SEP]" ,A_ : str="[UNK]" ,A_ : str="[PAD]" ,A_ : Dict="[CLS]" ,A_ : Optional[Any]="[MASK]" ,A_ : Optional[Dict[str, Any]] = None ,**A_ : List[Any] ,) -> None:
A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ ,eos_token=A_ ,sep_token=A_ ,unk_token=A_ ,pad_token=A_ ,cls_token=A_ ,mask_token=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,)
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
A = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
A = {'[PAD]': 0, '[CLS]': 1, '[SEP]': 2, '[UNK]': 3, '[MASK]': 4}
for i in range(10 ):
A = F'[unused{i}]'
A = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
A = 12
A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(A_ )
def __getstate__( self : Tuple ) -> List[str]:
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : Dict ,A_ : int ) -> str:
A = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ ,token_ids_a=A_ ,already_has_special_tokens=A_ )
if token_ids_a is None:
return ([0] * len(A_ )) + [1]
return ([0] * len(A_ )) + [1] + ([0] * len(A_ )) + [1]
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]:
A = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
return len(self.sp_model ) + self.fairseq_offset
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : str ) -> str:
return self.sp_model.encode(A_ ,out_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Union[str, Any] ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A = self.sp_model.PieceToId(A_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[str] ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Union[str, Any] ) -> Tuple:
A = ''.join(A_ ).replace(A_ ,' ' ).strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ ,'wb' ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
A = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep | 91 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : List[str]=0 ) -> str:
A = floats_tensor((1, 3, 128, 128) ,rng=random.Random(A_ ) )
A = np.random.RandomState(A_ )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=A_ )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
# warmup pass to apply optimizations
A = pipe(**self.get_dummy_inputs() )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
A = ort.SessionOptions()
A = False
return options
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((768, 512) )
# using the PNDM scheduler by default
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=A_ )
A = 'A fantasy landscape, trending on artstation'
A = np.random.RandomState(0 )
A = pipe(
prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=A_ ,output_type='np' ,)
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((768, 512) )
A = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' )
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=A_ ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=A_ )
A = 'A fantasy landscape, trending on artstation'
A = np.random.RandomState(0 )
A = pipe(
prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=A_ ,output_type='np' ,)
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 | 91 | 1 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
UpperCAmelCase_ : int = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
UpperCAmelCase_ : Optional[Any] = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
UpperCAmelCase_ : Union[str, Any] = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
UpperCAmelCase_ : Optional[int] = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F'''python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 590 |
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[int] ) -> float:
"""simple docstring"""
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
UpperCamelCase :List[Any] = sum(__magic_name__ ) / len(__magic_name__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 590 | 1 |
from __future__ import annotations
import typing
from collections import Counter
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> typing.Counter[int]:
__A : typing.Counter[int] = Counter()
for base in range(1 ,max_perimeter + 1 ):
for perpendicular in range(a__ ,max_perimeter + 1 ):
__A : Any = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(a__ ):
__A : int = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def __SCREAMING_SNAKE_CASE ( a__ : int = 1000 ) -> int:
__A : str = pythagorean_triple(a__ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"""Perimeter {solution()} has maximum solutions""")
| 17 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _lowerCAmelCase ( lowercase ) -> Optional[Any]:
# vision encoder
if "img_encoder.pos_embed" in name:
__lowerCAmelCase = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
__lowerCAmelCase = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
__lowerCAmelCase = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
__lowerCAmelCase = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
__lowerCAmelCase = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
__lowerCAmelCase = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
__lowerCAmelCase = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
__lowerCAmelCase = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
__lowerCAmelCase = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
__lowerCAmelCase = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
__lowerCAmelCase = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
__lowerCAmelCase = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
__lowerCAmelCase = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
__lowerCAmelCase = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
__lowerCAmelCase = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
__lowerCAmelCase = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
__lowerCAmelCase = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
__lowerCAmelCase = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
__lowerCAmelCase = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
__lowerCAmelCase = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
__lowerCAmelCase = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
__lowerCAmelCase = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
__lowerCAmelCase = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> Dict:
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(lowercase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__lowerCAmelCase = key.split(""".""" )
__lowerCAmelCase , __lowerCAmelCase = int(key_split[2] ), int(key_split[4] )
__lowerCAmelCase = config.vision_config.hidden_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__lowerCAmelCase = key.split(""".""" )
__lowerCAmelCase = int(key_split[3] )
__lowerCAmelCase = config.text_config.hidden_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[
dim : dim * 2, :
]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
else:
__lowerCAmelCase = rename_key(lowercase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
__lowerCAmelCase = val.squeeze_()
else:
__lowerCAmelCase = val
return orig_state_dict
def _lowerCAmelCase ( ) -> str:
__lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCAmelCase = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase , lowercase="groupvit-gcc-yfcc" , lowercase=False ) -> List[Any]:
__lowerCAmelCase = GroupViTConfig()
__lowerCAmelCase = GroupViTModel(lowercase ).eval()
__lowerCAmelCase = torch.load(lowercase , map_location="""cpu""" )["""model"""]
__lowerCAmelCase = convert_state_dict(lowercase , lowercase )
__lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(lowercase , strict=lowercase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowercase ) == 0)
# verify result
__lowerCAmelCase = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=lowercase , padding=lowercase , return_tensors="""pt""" )
with torch.no_grad():
__lowerCAmelCase = model(**lowercase )
if model_name == "groupvit-gcc-yfcc":
__lowerCAmelCase = torch.tensor([[13.35_23, 6.36_29]] )
elif model_name == "groupvit-gcc-redcaps":
__lowerCAmelCase = torch.tensor([[16.18_73, 8.62_30]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , lowercase , atol=1e-3 )
processor.save_pretrained(lowercase )
model.save_pretrained(lowercase )
print("""Successfully saved processor and model to""" , lowercase )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowercase , organization="""nielsr""" )
model.push_to_hub(lowercase , organization="""nielsr""" )
if __name__ == "__main__":
_a : int = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
_a : List[str] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 689 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
_A : List[str] = MobileBertTokenizer
_A : Optional[int] = MobileBertTokenizerFast
_A : List[str] = True
_A : str = True
_A : int = filter_non_english
_A : List[str] = '''google/mobilebert-uncased'''
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
super().setUp()
__SCREAMING_SNAKE_CASE : Tuple = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__SCREAMING_SNAKE_CASE : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = """UNwant\u00E9d,running"""
__SCREAMING_SNAKE_CASE : Optional[int] = """unwanted, running"""
return input_text, output_text
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer_class(self.vocab_file )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowerCAmelCase__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__SCREAMING_SNAKE_CASE : str = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : str = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : Any = """UNwant\u00E9d,running"""
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : Any = tokenizer.encode(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# With lower casing
__SCREAMING_SNAKE_CASE : str = self.get_tokenizer(do_lower_case=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = """UNwant\u00E9d,running"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : str = tokenizer.encode(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer(do_lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = BasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = BasicTokenizer(do_lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer(do_lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = BasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = BasicTokenizer(do_lower_case=lowerCAmelCase__ , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
__SCREAMING_SNAKE_CASE : Optional[int] = {}
for i, token in enumerate(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : int = i
__SCREAMING_SNAKE_CASE : Tuple = WordpieceTokenizer(vocab=lowerCAmelCase__ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
__SCREAMING_SNAKE_CASE : Any = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
__SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.encode_plus(
lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase__ , """do_lower_case""" ) else False
__SCREAMING_SNAKE_CASE : Tuple = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), """Allen"""),
((2_1, 2_3), """##NL"""),
((2_3, 2_4), """##P"""),
((2_5, 3_3), """sentence"""),
((3_3, 3_4), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), """allen"""),
((2_1, 2_3), """##nl"""),
((2_3, 2_4), """##p"""),
((2_5, 3_3), """sentence"""),
((3_3, 3_4), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = ["""的""", """人""", """有"""]
__SCREAMING_SNAKE_CASE : List[Any] = """""".join(lowerCAmelCase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
__SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer_p.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = tokenizer_r.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase__ )
# it is expected that only the first Chinese character is not preceded by "##".
__SCREAMING_SNAKE_CASE : Any = [
F"##{token}" if idx != 0 else token for idx, token in enumerate(lowerCAmelCase__ )
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) | 715 |
'''simple docstring'''
def lowerCAmelCase_ ( _lowerCamelCase: int ):
if number > 0:
raise ValueError("""input must be a negative integer""" )
__SCREAMING_SNAKE_CASE : str = len(bin(_lowerCamelCase )[3:] )
__SCREAMING_SNAKE_CASE : Any = bin(abs(_lowerCamelCase ) - (1 << binary_number_length) )[3:]
__SCREAMING_SNAKE_CASE : Optional[int] = (
(
"""1"""
+ """0""" * (binary_number_length - len(_lowerCamelCase ))
+ twos_complement_number
)
if number < 0
else """0"""
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod() | 178 | 0 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_A : Optional[int] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : List[str] = GPTSwaTokenizer
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : Optional[Any] = True
_UpperCAmelCase : Any = False
def __lowerCamelCase ( self : Optional[int] ) ->Optional[int]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ : Optional[int] = GPTSwaTokenizer(A , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self : Tuple , A : Any ) ->Any:
lowerCamelCase__ : Optional[int] = '''This is a test'''
lowerCamelCase__ : Dict = '''This is a test'''
return input_text, output_text
def __lowerCamelCase ( self : Union[str, Any] ) ->Any:
lowerCamelCase__ : Optional[int] = '''<s>'''
lowerCamelCase__ : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def __lowerCamelCase ( self : int ) ->str:
lowerCamelCase__ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(A ) , 2_0_0_0 )
def __lowerCamelCase ( self : Optional[Any] ) ->int:
self.assertEqual(self.get_tokenizer().vocab_size , 2_0_0_0 )
def __lowerCamelCase ( self : Dict ) ->Tuple:
lowerCamelCase__ : Union[str, Any] = GPTSwaTokenizer(A )
lowerCamelCase__ : str = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2] )
lowerCamelCase__ : Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
# fmt: off
self.assertListEqual(
A , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , )
# fmt: on
lowerCamelCase__ : Dict = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0] , )
lowerCamelCase__ : List[Any] = tokenizer.convert_ids_to_tokens(A )
# fmt: off
self.assertListEqual(
A , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] )
# fmt: on
def __lowerCamelCase ( self : List[Any] ) ->str:
lowerCamelCase__ : List[Any] = GPTSwaTokenizer(A )
lowerCamelCase__ : Dict = ['''This is a test''', '''I was born in 92000, and this is falsé.''']
lowerCamelCase__ : List[str] = [
[4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2],
[2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(A , A ):
self.assertListEqual(tokenizer.encode_fast(A ) , A )
# Test that decode_fast returns the input text
for text, token_ids in zip(A , A ):
self.assertEqual(tokenizer.decode_fast(A ) , A )
@slow
def __lowerCamelCase ( self : str ) ->List[str]:
lowerCamelCase__ : Dict = [
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
lowerCamelCase__ : str = {'''input_ids''': [[6_3_4_2_3, 5, 6_8_1_1, 1_4_9_5_4, 2_8_2, 8_1_6, 3_8_2_1, 6_3_4_6_6, 6_3_4_2_5, 6_3_4_6_2, 1_8, 6_3_9_7_8, 6_7_8, 3_0_1, 1_3_2_0, 6_3_4_2_3, 6_3_4_5_5, 6_3_4_5_8, 1_8, 6_3_9_8_2, 4_2_4_6, 3_9_4_0, 1_9_0_1, 4_7_7_8_9, 5_5_4_7, 1_8_9_9_4], [1_9_6_3_0, 1_1_0_0, 6_3_4_4_6, 1_3_4_2, 6_3_3, 5_4_4, 4_4_8_8, 5_9_3, 5_1_0_2, 2_4_1_6, 6_3_4_9_5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_6_5_2, 4_2_8, 2_6_8, 1_9_3_6, 5_1_5, 2_6_8, 5_8_5_9_3, 2_2_4_1_3, 9_1_0_6, 5_4_6, 2_6_8, 3_3_2_1_3, 6_3_9_7_9, 6_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5_1_3_0, 6_3_4_5_0, 9_2_4, 6_3_4_4_9, 2_2_4_9, 4_0_6_2, 1_5_5_8, 3_1_8, 6_3_5_0_4, 2_1_4_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_0_9, 3_7_7, 2_8_2_7, 2_5_5_9, 3_3_2, 6_5_7_5, 6_3_4_4_3, 2_6_8_0_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=A , )
| 315 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
_A : List[str] = logging.getLogger(__name__)
_A : Any = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
_A : Optional[int] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __SCREAMING_SNAKE_CASE :
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
} ,)
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowerCAmelCase_ )} ,)
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} ,)
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
_UpperCAmelCase : bool = field(
default=lowerCAmelCase_ ,metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} ,)
_UpperCAmelCase : str = field(
default="main" ,metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} ,)
_UpperCAmelCase : bool = field(
default=lowerCAmelCase_ ,metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} ,)
def __lowerCamelCase ( self : int ) ->Optional[Any]:
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' )
@dataclass
class __SCREAMING_SNAKE_CASE :
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "The name of the dataset to use (via the datasets library)."} )
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
_UpperCAmelCase : Optional[str] = field(default=lowerCAmelCase_ ,metadata={"help": "The input training data file (a text file)."} )
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} ,)
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "An optional input train ref data file for whole word masking in Chinese."} ,)
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."} ,)
_UpperCAmelCase : bool = field(
default=lowerCAmelCase_ ,metadata={"help": "Overwrite the cached training and evaluation sets"} )
_UpperCAmelCase : Optional[int] = field(
default=5 ,metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} ,)
_UpperCAmelCase : Optional[int] = field(
default=lowerCAmelCase_ ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated. Default to the max input length of the model."
)
} ,)
_UpperCAmelCase : Optional[int] = field(
default=lowerCAmelCase_ ,metadata={"help": "The number of processes to use for the preprocessing."} ,)
_UpperCAmelCase : float = field(
default=0.15 ,metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
_UpperCAmelCase : bool = field(
default=lowerCAmelCase_ ,metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} ,)
def __lowerCamelCase ( self : List[Any] ) ->List[str]:
if self.train_file is not None:
lowerCamelCase__ : int = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowerCamelCase__ : Tuple = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
with open(UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
lowerCamelCase__ : str = [json.loads(UpperCAmelCase ) for line in f.read().splitlines() if (len(UpperCAmelCase ) > 0 and not line.isspace())]
assert len(UpperCAmelCase ) == len(UpperCAmelCase )
lowerCamelCase__ : int = {c: dataset[c] for c in dataset.column_names}
lowerCamelCase__ : str = refs
return Dataset.from_dict(UpperCAmelCase )
def _a ( ) -> Optional[Any]:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase__ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCamelCase__ : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase__ : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , UpperCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase__ : Union[str, Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
lowerCamelCase__ : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"train[:{data_args.validation_split_percentage}%]" , )
lowerCamelCase__ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"train[{data_args.validation_split_percentage}%:]" , )
else:
lowerCamelCase__ : List[Any] = {}
if data_args.train_file is not None:
lowerCamelCase__ : List[Any] = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase__ : Optional[int] = data_args.validation_file
lowerCamelCase__ : Tuple = data_args.train_file.split('''.''' )[-1]
if extension == "txt":
lowerCamelCase__ : List[Any] = '''text'''
lowerCamelCase__ : Tuple = load_dataset(UpperCAmelCase , data_files=UpperCAmelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase__ : Optional[int] = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase__ : int = AutoConfig.from_pretrained(model_args.config_name , **UpperCAmelCase )
elif model_args.model_name_or_path:
lowerCamelCase__ : Dict = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase )
else:
lowerCamelCase__ : Union[str, Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(f"New config: {config}" )
lowerCamelCase__ : List[str] = {
'''cache_dir''': model_args.cache_dir,
'''use_fast''': model_args.use_fast_tokenizer,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowerCamelCase__ : List[Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **UpperCAmelCase )
elif model_args.model_name_or_path:
lowerCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' )
if model_args.model_name_or_path:
lowerCamelCase__ : Tuple = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
lowerCamelCase__ : List[Any] = AutoModelForMaskedLM.from_config(UpperCAmelCase )
model.resize_token_embeddings(len(UpperCAmelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowerCamelCase__ : Optional[int] = datasets['''train'''].column_names
else:
lowerCamelCase__ : Optional[int] = datasets['''validation'''].column_names
lowerCamelCase__ : List[str] = '''text''' if '''text''' in column_names else column_names[0]
lowerCamelCase__ : Dict = '''max_length''' if data_args.pad_to_max_length else False
def tokenize_function(UpperCAmelCase ):
# Remove empty lines
lowerCamelCase__ : int = [line for line in examples['''text'''] if len(UpperCAmelCase ) > 0 and not line.isspace()]
return tokenizer(examples['''text'''] , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=data_args.max_seq_length )
lowerCamelCase__ : Optional[int] = datasets.map(
UpperCAmelCase , batched=UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowerCamelCase__ : Optional[Any] = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
lowerCamelCase__ : str = add_chinese_references(
tokenized_datasets['''validation'''] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
lowerCamelCase__ : int = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowerCamelCase__ : Tuple = False
# Data collator
# This one will take care of randomly masking the tokens.
lowerCamelCase__ : List[Any] = DataCollatorForWholeWordMask(tokenizer=UpperCAmelCase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCamelCase__ : Tuple = Trainer(
model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=UpperCAmelCase , data_collator=UpperCAmelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCamelCase__ : Optional[Any] = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
lowerCamelCase__ : Union[str, Any] = model_args.model_name_or_path
else:
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : Dict = trainer.train(resume_from_checkpoint=UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase__ : Union[str, Any] = os.path.join(training_args.output_dir , '''train_results.txt''' )
if trainer.is_world_process_zero():
with open(UpperCAmelCase , '''w''' ) as writer:
logger.info('''***** Train results *****''' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f" {key} = {value}" )
writer.write(f"{key} = {value}\n" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# Evaluation
lowerCamelCase__ : int = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase__ : Tuple = trainer.evaluate()
lowerCamelCase__ : Union[str, Any] = math.exp(eval_output['''eval_loss'''] )
lowerCamelCase__ : str = perplexity
lowerCamelCase__ : List[Any] = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' )
if trainer.is_world_process_zero():
with open(UpperCAmelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in sorted(results.items() ):
logger.info(f" {key} = {value}" )
writer.write(f"{key} = {value}\n" )
return results
def _a ( UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 315 | 1 |
"""simple docstring"""
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def lowerCamelCase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] , **_lowerCamelCase : List[Any] ):
lowerCamelCase_ = AutoConfig.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
lowerCamelCase_ = AutoModelForSeqaSeqLM.from_config(_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
AutoTokenizer.from_pretrained(_lowerCamelCase ).save_pretrained(_lowerCamelCase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version) | 714 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase : Tuple = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__lowercase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 66 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__A = logging.get_logger(__name__)
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = ['pixel_values']
def __init__( self : str , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : bool = True , **__SCREAMING_SNAKE_CASE : Tuple , ) -> None:
super().__init__(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =size if size is not None else {"""height""": 384, """width""": 384}
__UpperCAmelCase =get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =do_resize
__UpperCAmelCase =size
__UpperCAmelCase =resample
__UpperCAmelCase =do_rescale
__UpperCAmelCase =rescale_factor
__UpperCAmelCase =do_normalize
__UpperCAmelCase =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__UpperCAmelCase =image_std if image_std is not None else OPENAI_CLIP_STD
__UpperCAmelCase =do_convert_rgb
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Dict[str, int] , __SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Dict , ) -> np.ndarray:
__UpperCAmelCase =get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
__UpperCAmelCase =(size["""height"""], size["""width"""])
return resize(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Union[int, float] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Any , ) -> Dict:
return rescale(__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Union[float, List[float]] , __SCREAMING_SNAKE_CASE : Union[float, List[float]] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Any , ) -> np.ndarray:
return normalize(__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : int , __SCREAMING_SNAKE_CASE : ImageInput , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[Dict[str, int]] = None , __SCREAMING_SNAKE_CASE : PILImageResampling = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[float] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : ChannelDimension = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE : List[str] , ) -> PIL.Image.Image:
__UpperCAmelCase =do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase =resample if resample is not None else self.resample
__UpperCAmelCase =do_rescale if do_rescale is not None else self.do_rescale
__UpperCAmelCase =rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCAmelCase =do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase =image_mean if image_mean is not None else self.image_mean
__UpperCAmelCase =image_std if image_std is not None else self.image_std
__UpperCAmelCase =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__UpperCAmelCase =size if size is not None else self.size
__UpperCAmelCase =get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =make_list_of_images(__SCREAMING_SNAKE_CASE )
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__UpperCAmelCase =[convert_to_rgb(__SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
__UpperCAmelCase =[to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
__UpperCAmelCase =[self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
__UpperCAmelCase =[self.rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
__UpperCAmelCase =[self.normalize(image=__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE ) for image in images]
__UpperCAmelCase =[to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images]
__UpperCAmelCase =BatchFeature(data={"""pixel_values""": images} , tensor_type=__SCREAMING_SNAKE_CASE )
return encoded_outputs
| 68 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def lowercase__ ( A_: int , A_: int , A_: int , A_: int , A_: int , A_: int ) -> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
__UpperCAmelCase =ksize + 1
__UpperCAmelCase =np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(A_ ):
for x in range(A_ ):
# distance from center
__UpperCAmelCase =x - ksize // 2
__UpperCAmelCase =y - ksize // 2
# degree to radiant
__UpperCAmelCase =theta / 180 * np.pi
__UpperCAmelCase =np.cos(_theta )
__UpperCAmelCase =np.sin(_theta )
# get kernel x
__UpperCAmelCase =cos_theta * px + sin_theta * py
# get kernel y
__UpperCAmelCase =-sin_theta * px + cos_theta * py
# fill kernel
__UpperCAmelCase =np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__A = imread("../image_data/lena.jpg")
# turn image in gray scale value
__A = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__A = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
__A = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__A = out / out.max() * 2_55
__A = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 68 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Dict = logging.get_logger(__name__)
a__ : List[Any] = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
snake_case_ = 'markuplm'
def __init__( self : List[Any] , a_ : Dict=3_05_22 , a_ : Dict=7_68 , a_ : Tuple=12 , a_ : List[str]=12 , a_ : Optional[Any]=30_72 , a_ : str="gelu" , a_ : Optional[Any]=0.1 , a_ : List[Any]=0.1 , a_ : str=5_12 , a_ : List[str]=2 , a_ : Union[str, Any]=0.0_2 , a_ : List[str]=1e-12 , a_ : Any=0 , a_ : Optional[int]=0 , a_ : int=2 , a_ : List[Any]=2_56 , a_ : str=10_24 , a_ : str=2_16 , a_ : Dict=10_01 , a_ : Dict=32 , a_ : Any=50 , a_ : Any="absolute" , a_ : List[str]=True , a_ : str=None , **a_ : Tuple , ):
"""simple docstring"""
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ , )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_act
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = position_embedding_type
lowerCamelCase__ = use_cache
lowerCamelCase__ = classifier_dropout
# additional properties
lowerCamelCase__ = max_depth
lowerCamelCase__ = max_xpath_tag_unit_embeddings
lowerCamelCase__ = max_xpath_subs_unit_embeddings
lowerCamelCase__ = tag_pad_id
lowerCamelCase__ = subs_pad_id
lowerCamelCase__ = xpath_unit_hidden_size
| 235 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
a__ : int = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def snake_case (UpperCamelCase : Optional[int] ):
'''simple docstring'''
if isinstance(UpperCamelCase , torch.Tensor ):
return image
elif isinstance(UpperCamelCase , PIL.Image.Image ):
lowerCamelCase__ = [image]
lowerCamelCase__ = [trans(img.convert("""RGB""" ) ) for img in image]
lowerCamelCase__ = torch.stack(UpperCamelCase )
return image
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[int] , a_ : Optional[int] , a_ : List[Any] ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCamelCase__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=a_ , scheduler=a_ )
def _UpperCamelCase ( self : Any , a_ : str ):
"""simple docstring"""
if strength < 0 or strength > 1:
raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def _UpperCamelCase ( self : Union[str, Any] , a_ : str , a_ : int , a_ : List[str] ):
"""simple docstring"""
lowerCamelCase__ = min(int(num_inference_steps * strength ) , a_ )
lowerCamelCase__ = max(num_inference_steps - init_timestep , 0 )
lowerCamelCase__ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _UpperCamelCase ( self : List[str] , a_ : Dict , a_ : Dict , a_ : Optional[Any] , a_ : str , a_ : int , a_ : List[Any]=None ):
"""simple docstring"""
if not isinstance(a_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a_ )}''' )
lowerCamelCase__ = image.to(device=a_ , dtype=a_ )
if isinstance(a_ , a_ ) and len(a_ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(a_ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase__ = init_latents.shape
lowerCamelCase__ = randn_tensor(a_ , generator=a_ , device=a_ , dtype=a_ )
# get latents
print("""add noise to latents at timestep""" , a_ )
lowerCamelCase__ = self.scheduler.add_noise(a_ , a_ , a_ )
lowerCamelCase__ = init_latents
return latents
@torch.no_grad()
def __call__( self : Union[str, Any] , a_ : Union[torch.FloatTensor, PIL.Image.Image] = None , a_ : float = 0.8 , a_ : int = 1 , a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a_ : float = 0.0 , a_ : int = 50 , a_ : Optional[bool] = None , a_ : Optional[str] = "pil" , a_ : bool = True , ):
"""simple docstring"""
self.check_inputs(a_ )
# 2. Preprocess image
lowerCamelCase__ = preprocess(a_ )
# 3. set timesteps
self.scheduler.set_timesteps(a_ , device=self.device )
lowerCamelCase__ , lowerCamelCase__ = self.get_timesteps(a_ , a_ , self.device )
lowerCamelCase__ = timesteps[:1].repeat(a_ )
# 4. Prepare latent variables
lowerCamelCase__ = self.prepare_latents(a_ , a_ , a_ , self.unet.dtype , self.device , a_ )
lowerCamelCase__ = latents
# 5. Denoising loop
for t in self.progress_bar(a_ ):
# 1. predict noise model_output
lowerCamelCase__ = self.unet(a_ , a_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCamelCase__ = self.scheduler.step(
a_ , a_ , a_ , eta=a_ , use_clipped_model_output=a_ , generator=a_ , ).prev_sample
lowerCamelCase__ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase__ = self.numpy_to_pil(a_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=a_ )
| 235 | 1 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( __snake_case , unittest.TestCase ):
lowercase = AudioLDMPipeline
lowercase = TEXT_TO_AUDIO_PARAMS
lowercase = TEXT_TO_AUDIO_BATCH_PARAMS
lowercase = frozenset(
[
"""num_inference_steps""",
"""num_waveforms_per_prompt""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(3_2, 6_4) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=__magic_name__ , )
UpperCamelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , )
torch.manual_seed(0 )
UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , )
UpperCamelCase = ClapTextModelWithProjection(__magic_name__ )
UpperCamelCase = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=7_7 )
UpperCamelCase = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__magic_name__ , )
UpperCamelCase = SpeechTaHifiGan(__magic_name__ )
UpperCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def lowerCamelCase_ ( self : List[str] , __magic_name__ : Tuple , __magic_name__ : Tuple=0 ):
"""simple docstring"""
if str(__magic_name__ ).startswith("""mps""" ):
UpperCamelCase = torch.manual_seed(__magic_name__ )
else:
UpperCamelCase = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
UpperCamelCase = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = AudioLDMPipeline(**__magic_name__ )
UpperCamelCase = audioldm_pipe.to(__magic_name__ )
audioldm_pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase = self.get_dummy_inputs(__magic_name__ )
UpperCamelCase = audioldm_pipe(**__magic_name__ )
UpperCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(__magic_name__ ) == 2_5_6
UpperCamelCase = audio[:1_0]
UpperCamelCase = np.array(
[-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = AudioLDMPipeline(**__magic_name__ )
UpperCamelCase = audioldm_pipe.to(__magic_name__ )
UpperCamelCase = audioldm_pipe.to(__magic_name__ )
audioldm_pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase = self.get_dummy_inputs(__magic_name__ )
UpperCamelCase = 3 * [inputs["""prompt"""]]
# forward
UpperCamelCase = audioldm_pipe(**__magic_name__ )
UpperCamelCase = output.audios[0]
UpperCamelCase = self.get_dummy_inputs(__magic_name__ )
UpperCamelCase = 3 * [inputs.pop("""prompt""" )]
UpperCamelCase = audioldm_pipe.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__magic_name__ , return_tensors="""pt""" , )
UpperCamelCase = text_inputs["""input_ids"""].to(__magic_name__ )
UpperCamelCase = audioldm_pipe.text_encoder(
__magic_name__ , )
UpperCamelCase = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCamelCase = F.normalize(__magic_name__ , dim=-1 )
UpperCamelCase = prompt_embeds
# forward
UpperCamelCase = audioldm_pipe(**__magic_name__ )
UpperCamelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = AudioLDMPipeline(**__magic_name__ )
UpperCamelCase = audioldm_pipe.to(__magic_name__ )
UpperCamelCase = audioldm_pipe.to(__magic_name__ )
audioldm_pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase = self.get_dummy_inputs(__magic_name__ )
UpperCamelCase = 3 * ["""this is a negative prompt"""]
UpperCamelCase = negative_prompt
UpperCamelCase = 3 * [inputs["""prompt"""]]
# forward
UpperCamelCase = audioldm_pipe(**__magic_name__ )
UpperCamelCase = output.audios[0]
UpperCamelCase = self.get_dummy_inputs(__magic_name__ )
UpperCamelCase = 3 * [inputs.pop("""prompt""" )]
UpperCamelCase = []
for p in [prompt, negative_prompt]:
UpperCamelCase = audioldm_pipe.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__magic_name__ , return_tensors="""pt""" , )
UpperCamelCase = text_inputs["""input_ids"""].to(__magic_name__ )
UpperCamelCase = audioldm_pipe.text_encoder(
__magic_name__ , )
UpperCamelCase = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCamelCase = F.normalize(__magic_name__ , dim=-1 )
embeds.append(__magic_name__ )
UpperCamelCase , UpperCamelCase = embeds
# forward
UpperCamelCase = audioldm_pipe(**__magic_name__ )
UpperCamelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = PNDMScheduler(skip_prk_steps=__magic_name__ )
UpperCamelCase = AudioLDMPipeline(**__magic_name__ )
UpperCamelCase = audioldm_pipe.to(__magic_name__ )
audioldm_pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase = self.get_dummy_inputs(__magic_name__ )
UpperCamelCase = """egg cracking"""
UpperCamelCase = audioldm_pipe(**__magic_name__ , negative_prompt=__magic_name__ )
UpperCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(__magic_name__ ) == 2_5_6
UpperCamelCase = audio[:1_0]
UpperCamelCase = np.array(
[-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = PNDMScheduler(skip_prk_steps=__magic_name__ )
UpperCamelCase = AudioLDMPipeline(**__magic_name__ )
UpperCamelCase = audioldm_pipe.to(__magic_name__ )
audioldm_pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
UpperCamelCase = audioldm_pipe(__magic_name__ , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_5_6)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
UpperCamelCase = 2
UpperCamelCase = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_5_6)
# test num_waveforms_per_prompt for single prompt
UpperCamelCase = 2
UpperCamelCase = audioldm_pipe(__magic_name__ , num_inference_steps=2 , num_waveforms_per_prompt=__magic_name__ ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_5_6)
# test num_waveforms_per_prompt for batch of prompts
UpperCamelCase = 2
UpperCamelCase = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__magic_name__ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6)
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = AudioLDMPipeline(**__magic_name__ )
UpperCamelCase = audioldm_pipe.to(__magic_name__ )
audioldm_pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase = audioldm_pipe.vocoder.config.sampling_rate
UpperCamelCase = self.get_dummy_inputs(__magic_name__ )
UpperCamelCase = audioldm_pipe(audio_length_in_s=0.016 , **__magic_name__ )
UpperCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(__magic_name__ ) / vocoder_sampling_rate == 0.016
UpperCamelCase = audioldm_pipe(audio_length_in_s=0.032 , **__magic_name__ )
UpperCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(__magic_name__ ) / vocoder_sampling_rate == 0.032
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = AudioLDMPipeline(**__magic_name__ )
UpperCamelCase = audioldm_pipe.to(__magic_name__ )
audioldm_pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase = ["""hey"""]
UpperCamelCase = audioldm_pipe(__magic_name__ , num_inference_steps=1 )
UpperCamelCase = output.audios.shape
assert audio_shape == (1, 2_5_6)
UpperCamelCase = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
UpperCamelCase = SpeechTaHifiGan(__magic_name__ ).to(__magic_name__ )
UpperCamelCase = audioldm_pipe(__magic_name__ , num_inference_steps=1 )
UpperCamelCase = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_5_6)
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__magic_name__ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=__magic_name__ )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__magic_name__ )
@slow
class UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : Any="cpu" , __magic_name__ : Optional[int]=torch.floataa , __magic_name__ : Tuple=0 ):
"""simple docstring"""
UpperCamelCase = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
UpperCamelCase = np.random.RandomState(__magic_name__ ).standard_normal((1, 8, 1_2_8, 1_6) )
UpperCamelCase = torch.from_numpy(__magic_name__ ).to(device=__magic_name__ , dtype=__magic_name__ )
UpperCamelCase = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
UpperCamelCase = audioldm_pipe.to(__magic_name__ )
audioldm_pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase = self.get_inputs(__magic_name__ )
UpperCamelCase = 2_5
UpperCamelCase = audioldm_pipe(**__magic_name__ ).audios[0]
assert audio.ndim == 1
assert len(__magic_name__ ) == 8_1_9_2_0
UpperCamelCase = audio[7_7_2_3_0:7_7_2_4_0]
UpperCamelCase = np.array(
[-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315] )
UpperCamelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
UpperCamelCase = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
UpperCamelCase = audioldm_pipe.to(__magic_name__ )
audioldm_pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase = self.get_inputs(__magic_name__ )
UpperCamelCase = audioldm_pipe(**__magic_name__ ).audios[0]
assert audio.ndim == 1
assert len(__magic_name__ ) == 8_1_9_2_0
UpperCamelCase = audio[2_7_7_8_0:2_7_7_9_0]
UpperCamelCase = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212] )
UpperCamelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 386 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
"configuration_autoformer": [
"AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AutoformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"AutoformerForPrediction",
"AutoformerModel",
"AutoformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 386 | 1 |
import math
from numpy import inf
from scipy.integrate import quad
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if num <= 0:
raise ValueError("""math domain error""" )
return quad(SCREAMING_SNAKE_CASE , 0 , SCREAMING_SNAKE_CASE , args=(SCREAMING_SNAKE_CASE) )[0]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return math.pow(SCREAMING_SNAKE_CASE , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 450 |
import os
import sys
import unittest
__A : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__A : List[Any] = os.path.join(git_repo_path, """src""", """diffusers""")
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = find_backend(""" if not is_torch_available():""" )
self.assertEqual(a , """torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
SCREAMING_SNAKE_CASE = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(a , """torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
SCREAMING_SNAKE_CASE = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(a , """torch_and_transformers_and_onnx""" )
def _UpperCAmelCase ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" , a )
self.assertIn("""torch_and_transformers""" , a )
self.assertIn("""flax_and_transformers""" , a )
self.assertIn("""torch_and_transformers_and_onnx""" , a )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""" , objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""" , objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""" , objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""" , objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""" , objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""" , objects["""torch_and_transformers_and_onnx"""] )
def _UpperCAmelCase ( self : Any ) -> int:
SCREAMING_SNAKE_CASE = create_dummy_object("""CONSTANT""" , """'torch'""" )
self.assertEqual(a , """\nCONSTANT = None\n""" )
SCREAMING_SNAKE_CASE = create_dummy_object("""function""" , """'torch'""" )
self.assertEqual(
a , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
SCREAMING_SNAKE_CASE = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
SCREAMING_SNAKE_CASE = create_dummy_object("""FakeClass""" , """'torch'""" )
self.assertEqual(a , a )
def _UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
SCREAMING_SNAKE_CASE = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] , a )
| 450 | 1 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def a ( A__ , A__ , A__ ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = RemBertConfig.from_json_file(A__ )
print('''Building PyTorch model from configuration: {}'''.format(str(A__ ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = RemBertModel(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(A__ , A__ , A__ )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(A__ ) )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
a_ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ :Optional[Any] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 35 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowercase :
@staticmethod
def lowercase__ ( *_lowercase : Optional[Any] , **_lowercase : str ):
pass
def a ( A__ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase ( unittest.TestCase ):
lowerCamelCase : int = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : Any , _lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : List[str] = DepthEstimationPipeline(model=_lowercase , image_processor=_lowercase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowercase__ ( self : Union[str, Any] , _lowercase : int , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : Optional[int] = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , _lowercase )
import datasets
SCREAMING_SNAKE_CASE__ : List[str] = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
SCREAMING_SNAKE_CASE__ : Dict = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , _lowercase , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def lowercase__ ( self : Optional[int] ):
pass
@slow
@require_torch
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = '''Intel/dpt-large'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipeline('''depth-estimation''' , model=_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
SCREAMING_SNAKE_CASE__ : List[str] = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.662 )
@require_torch
def lowercase__ ( self : str ):
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 35 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowerCAmelCase ( lowerCAmelCase):
_a = '''naver-clova-ix/donut-base-finetuned-docvqa'''
_a = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
_a = '''document_qa'''
_a = AutoProcessor
_a = VisionEncoderDecoderModel
_a = ['''image''', '''text''']
_a = ['''text''']
def __init__( self: str , *_lowerCAmelCase: Tuple , **_lowerCAmelCase: List[str] ):
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: str , _lowerCAmelCase: "Image" , _lowerCAmelCase: str ):
lowercase :List[str] = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
lowercase :List[Any] = task_prompt.replace("{user_input}" , _lowerCAmelCase )
lowercase :str = self.pre_processor.tokenizer(
_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_tensors="pt" ).input_ids
lowercase :Any = self.pre_processor(_lowerCAmelCase , return_tensors="pt" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def SCREAMING_SNAKE_CASE ( self: Optional[Any] , _lowerCAmelCase: Any ):
return self.model.generate(
inputs["pixel_values"].to(self.device ) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_lowerCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_lowerCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_lowerCAmelCase , ).sequences
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: Tuple ):
lowercase :Tuple = self.pre_processor.batch_decode(_lowerCAmelCase )[0]
lowercase :List[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , "" )
lowercase :Tuple = sequence.replace(self.pre_processor.tokenizer.pad_token , "" )
lowercase :int = re.sub(r"<.*?>" , "" , _lowerCAmelCase , count=1 ).strip() # remove first task start token
lowercase :List[Any] = self.pre_processor.tokenajson(_lowerCAmelCase )
return sequence["answer"]
| 709 |
from collections.abc import Generator
from math import sin
def UpperCAmelCase__ ( lowerCamelCase ):
if len(lowerCamelCase ) != 32:
raise ValueError("Input must be of length 32" )
lowercase :Any = B""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCAmelCase__ ( lowerCamelCase ):
if i < 0:
raise ValueError("Input must be non-negative" )
lowercase :List[Any] = format(lowerCamelCase, "08x" )[-8:]
lowercase :Tuple = B""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :Union[str, Any] = B""
for char in message:
bit_string += format(lowerCamelCase, "08b" ).encode("utf-8" )
lowercase :str = format(len(lowerCamelCase ), "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(lowerCamelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def UpperCAmelCase__ ( lowerCamelCase ):
if len(lowerCamelCase ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0, len(lowerCamelCase ), 512 ):
lowercase :List[str] = bit_string[pos : pos + 512]
lowercase :int = []
for i in range(0, 512, 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ), 2 ) )
yield block_words
def UpperCAmelCase__ ( lowerCamelCase ):
if i < 0:
raise ValueError("Input must be non-negative" )
lowercase :List[str] = format(lowerCamelCase, "032b" )
lowercase :Optional[Any] = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(lowerCamelCase, 2 )
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
return (a + b) % 2**32
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :Tuple = preprocess(lowerCamelCase )
lowercase :Tuple = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
lowercase :Optional[Any] = 0X6745_2301
lowercase :Union[str, Any] = 0XEFCD_AB89
lowercase :Optional[int] = 0X98BA_DCFE
lowercase :str = 0X1032_5476
lowercase :str = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(lowerCamelCase ):
lowercase :str = aa
lowercase :int = ba
lowercase :int = ca
lowercase :List[str] = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
lowercase :Optional[int] = d ^ (b & (c ^ d))
lowercase :Optional[Any] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
lowercase :Any = c ^ (d & (b ^ c))
lowercase :Union[str, Any] = (5 * i + 1) % 16
elif i <= 47:
lowercase :Any = b ^ c ^ d
lowercase :List[Any] = (3 * i + 5) % 16
else:
lowercase :List[Any] = c ^ (b | not_aa(lowerCamelCase ))
lowercase :Any = (7 * i) % 16
lowercase :Dict = (f + a + added_consts[i] + block_words[g]) % 2**32
lowercase :Any = d
lowercase :str = c
lowercase :Union[str, Any] = b
lowercase :Union[str, Any] = sum_aa(lowerCamelCase, left_rotate_aa(lowerCamelCase, shift_amounts[i] ) )
# Add hashed chunk to running total
lowercase :Optional[Any] = sum_aa(lowerCamelCase, lowerCamelCase )
lowercase :List[Any] = sum_aa(lowerCamelCase, lowerCamelCase )
lowercase :List[str] = sum_aa(lowerCamelCase, lowerCamelCase )
lowercase :Union[str, Any] = sum_aa(lowerCamelCase, lowerCamelCase )
lowercase :int = reformat_hex(lowerCamelCase ) + reformat_hex(lowerCamelCase ) + reformat_hex(lowerCamelCase ) + reformat_hex(lowerCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 453 | 0 |
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _a :
'''simple docstring'''
def __init__( self, A = "cpu", A = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = device
SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizerFast.from_pretrained(A )
SCREAMING_SNAKE_CASE : int = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73]
SCREAMING_SNAKE_CASE : str = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11]
SCREAMING_SNAKE_CASE : Dict = torchvision.transforms.Normalize(self.image_mean, self.image_std )
SCREAMING_SNAKE_CASE : List[str] = torchvision.transforms.Resize(224 )
SCREAMING_SNAKE_CASE : List[Any] = torchvision.transforms.CenterCrop(224 )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.resize(A )
SCREAMING_SNAKE_CASE : Any = self.center_crop(A )
SCREAMING_SNAKE_CASE : str = self.normalize(A )
return images
def __call__( self, A=None, A=None, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.tokenizer(text=A, **A )
SCREAMING_SNAKE_CASE : Tuple = self.preprocess_img(A )
SCREAMING_SNAKE_CASE : List[str] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self, A=10, A=0.01, A=None, A=None, A=None, A=None, A=None, A=None, A=False, A=True, A="image", A=True, A=False, A=False, A=False, ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : List[Any] = device if device else get_device()
if vqgan:
SCREAMING_SNAKE_CASE : Optional[Any] = vqgan
else:
SCREAMING_SNAKE_CASE : Tuple = load_vqgan(self.device, conf_path=A, ckpt_path=A )
self.vqgan.eval()
if clip:
SCREAMING_SNAKE_CASE : List[str] = clip
else:
SCREAMING_SNAKE_CASE : Any = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
SCREAMING_SNAKE_CASE : Optional[int] = ProcessorGradientFlow(device=self.device )
SCREAMING_SNAKE_CASE : Optional[int] = iterations
SCREAMING_SNAKE_CASE : Tuple = lr
SCREAMING_SNAKE_CASE : Tuple = log
SCREAMING_SNAKE_CASE : str = make_grid
SCREAMING_SNAKE_CASE : Dict = return_val
SCREAMING_SNAKE_CASE : Union[str, Any] = quantize
SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self, A=None, A=None, A=5, A=True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = []
if output_path is None:
SCREAMING_SNAKE_CASE : int = './animation.gif'
if input_path is None:
SCREAMING_SNAKE_CASE : Optional[int] = self.save_path
SCREAMING_SNAKE_CASE : Optional[Any] = sorted(glob(input_path + '/*' ) )
if not len(A ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(A ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
SCREAMING_SNAKE_CASE : Optional[Any] = total_duration / len(A )
SCREAMING_SNAKE_CASE : int = [frame_duration] * len(A )
if extend_frames:
SCREAMING_SNAKE_CASE : List[str] = 1.5
SCREAMING_SNAKE_CASE : int = 3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(A ) )
imageio.mimsave(A, A, duration=A )
print(F"gif saved to {output_path}" )
def UpperCamelCase_ ( self, A=None, A=None ):
'''simple docstring'''
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
SCREAMING_SNAKE_CASE : str = preprocess(Image.open(A ), target_image_size=256 ).to(self.device )
SCREAMING_SNAKE_CASE : Any = preprocess_vqgan(A )
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : Tuple = self.vqgan.encode(A )
return z
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.latent.detach().requires_grad_()
SCREAMING_SNAKE_CASE : Union[str, Any] = base_latent + transform_vector
if self.quantize:
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.quantize(A )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = trans_latent
return self.vqgan.decode(A )
def UpperCamelCase_ ( self, A, A, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.clip_preprocessor(text=A, images=A, return_tensors='pt', padding=A )
SCREAMING_SNAKE_CASE : str = self.clip(**A )
SCREAMING_SNAKE_CASE : Any = clip_outputs.logits_per_image
if weights is not None:
SCREAMING_SNAKE_CASE : List[Any] = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_clip_similarity(pos_prompts['prompts'], A, weights=(1 / pos_prompts['weights']) )
if neg_prompts:
SCREAMING_SNAKE_CASE : List[Any] = self._get_clip_similarity(neg_prompts['prompts'], A, weights=neg_prompts['weights'] )
else:
SCREAMING_SNAKE_CASE : str = torch.tensor([1], device=self.device )
SCREAMING_SNAKE_CASE : List[Any] = -torch.log(A ) + torch.log(A )
return loss
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = torch.randn_like(self.latent, requires_grad=A, device=self.device )
SCREAMING_SNAKE_CASE : Optional[int] = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
SCREAMING_SNAKE_CASE : Union[str, Any] = self._add_vector(A )
SCREAMING_SNAKE_CASE : Dict = loop_post_process(A )
SCREAMING_SNAKE_CASE : List[str] = self._get_CLIP_loss(A, A, A )
print('CLIP loss', A )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=A )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
wandb.init(reinit=A, project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
SCREAMING_SNAKE_CASE : Tuple = Image.open(A )
SCREAMING_SNAKE_CASE : int = image.resize((256, 256) )
wandb.log('Original Image', wandb.Image(A ) )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not prompts:
return []
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Dict = []
if isinstance(A, A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(A, (tuple, list) ):
SCREAMING_SNAKE_CASE : List[str] = prompt[0]
SCREAMING_SNAKE_CASE : Any = float(prompt[1] )
elif ":" in prompt:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = prompt.split(':' )
SCREAMING_SNAKE_CASE : Any = float(A )
else:
SCREAMING_SNAKE_CASE : Dict = prompt
SCREAMING_SNAKE_CASE : List[Any] = 1.0
processed_prompts.append(A )
weights.append(A )
return {
"prompts": processed_prompts,
"weights": torch.tensor(A, device=self.device ),
}
def UpperCamelCase_ ( self, A, A=None, A=None, A=True, A=False, A=True, A=True, A=None, ):
'''simple docstring'''
if image_path:
SCREAMING_SNAKE_CASE : int = self._get_latent(A )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(A, A, A )
assert pos_prompts, "You must provide at least one positive prompt."
SCREAMING_SNAKE_CASE : Dict = self.process_prompts(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.process_prompts(A )
if save_final and save_path is None:
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join('./outputs/', '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(A ):
os.makedirs(A )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = save_path + '_' + get_timestamp()
os.makedirs(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = save_path
SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(A ) )
SCREAMING_SNAKE_CASE : int = loop_post_process(A )
for iter, transformed_img in enumerate(self._optimize_CLIP(A, A, A ) ):
if show_intermediate:
show_pil(A )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({'Image': wandb.Image(A )} )
if show_final:
show_pil(A )
if save_final:
transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}_final.png" ) )
| 28 |
from __future__ import annotations
__lowerCAmelCase = []
def _lowercase ( a__ : list[list[int]] , a__ : int , a__ : int ) -> bool:
"""simple docstring"""
for i in range(len(a__ ) ):
if board[row][i] == 1:
return False
for i in range(len(a__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(a__ , -1 , -1 ) , range(a__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(a__ , -1 , -1 ) , range(a__ , len(a__ ) ) ):
if board[i][j] == 1:
return False
return True
def _lowercase ( a__ : list[list[int]] , a__ : int ) -> bool:
"""simple docstring"""
if row >= len(a__ ):
solution.append(a__ )
printboard(a__ )
print()
return True
for i in range(len(a__ ) ):
if is_safe(a__ , a__ , a__ ):
_UpperCamelCase = 1
solve(a__ , row + 1 )
_UpperCamelCase = 0
return False
def _lowercase ( a__ : list[list[int]] ) -> None:
"""simple docstring"""
for i in range(len(a__ ) ):
for j in range(len(a__ ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
__lowerCAmelCase = 8
__lowerCAmelCase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("""The total no. of solutions are :""", len(solution))
| 147 | 0 |
"""simple docstring"""
from datetime import datetime
import requests
def __a ( A ):
'''simple docstring'''
lowercase__ = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url="
lowercase__ = requests.get(base_url + url ).json()[0]["urls"][0]["src"]
return requests.get(A ).content
if __name__ == "__main__":
lowerCAmelCase_: int = input("Enter Video/IGTV url: ").strip()
lowerCAmelCase_: Tuple = F'{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F'Done. Video saved to disk as {file_name}.')
| 703 | """simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase_: List[str] = 1_6
lowerCAmelCase_: Optional[Any] = 3_2
def __a ( A , A = 16 , A = "bert-base-cased" ):
'''simple docstring'''
lowercase__ = AutoTokenizer.from_pretrained(A )
lowercase__ = load_dataset("glue" , "mrpc" )
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ = datasets.map(
A , batched=A , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(A , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets["train"] , shuffle=A , collate_fn=A , batch_size=A )
lowercase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
def __a ( A , A ):
'''simple docstring'''
lowercase__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config["lr"]
lowercase__ = int(config["num_epochs"] )
lowercase__ = int(config["seed"] )
lowercase__ = int(config["batch_size"] )
lowercase__ = args.model_name_or_path
set_seed(A )
lowercase__ , lowercase__ = get_dataloaders(A , A , A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained(A , return_dict=A )
# Instantiate optimizer
lowercase__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ = optimizer_cls(params=model.parameters() , lr=A )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
lowercase__ = 1
lowercase__ = (len(A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=0 , num_training_steps=A , )
else:
lowercase__ = DummyScheduler(A , total_num_steps=A , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
A , A , A , A , A )
# We need to keep track of how many total steps we have iterated over
lowercase__ = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ = 0
# Now we train the model
lowercase__ = evaluate.load("glue" , "mrpc" )
lowercase__ = 0
lowercase__ = {}
for epoch in range(A , A ):
model.train()
for step, batch in enumerate(A ):
lowercase__ = model(**A )
lowercase__ = outputs.loss
lowercase__ = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
lowercase__ = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**A )
lowercase__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowercase__ , lowercase__ = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A ) - 1:
lowercase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowercase__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A , references=A , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , A )
lowercase__ = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
lowercase__ = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f:
json.dump(A , A )
def __a ( ):
'''simple docstring'''
lowercase__ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=A , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=A , )
parser.add_argument(
"--output_dir" , type=A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--performance_lower_bound" , type=A , default=A , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , )
parser.add_argument(
"--num_epochs" , type=A , default=3 , help="Number of train epochs." , )
lowercase__ = parser.parse_args()
lowercase__ = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(A , A )
if __name__ == "__main__":
main()
| 668 | 0 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def a__ ( lowercase__ ):
'''simple docstring'''
return np.dot(lowercase__ , lowercase__ )
class A :
def __init__( self: List[Any] , *,
_lowerCAmelCase: float = np.inf , _lowerCAmelCase: str = "linear" , _lowerCAmelCase: float = 0.0 , ) -> None:
'''simple docstring'''
UpperCAmelCase_ =regularization
UpperCAmelCase_ =gamma
if kernel == "linear":
UpperCAmelCase_ =self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("rbf kernel requires gamma" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("gamma must be float or int" )
if not self.gamma > 0:
raise ValueError("gamma must be > 0" )
UpperCAmelCase_ =self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCAmelCase_ =F'Unknown kernel: {kernel}'
raise ValueError(_lowerCAmelCase )
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: ndarray , _lowerCAmelCase: ndarray ) -> float:
'''simple docstring'''
return np.dot(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: ndarray , _lowerCAmelCase: ndarray ) -> float:
'''simple docstring'''
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def lowerCAmelCase__ ( self: Any , _lowerCAmelCase: list[ndarray] , _lowerCAmelCase: ndarray ) -> None:
'''simple docstring'''
UpperCAmelCase_ =observations
UpperCAmelCase_ =classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCAmelCase_) , ) =np.shape(_lowerCAmelCase )
def to_minimize(_lowerCAmelCase: ndarray ) -> float:
UpperCAmelCase_ =0
((UpperCAmelCase_) , ) =np.shape(_lowerCAmelCase )
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(_lowerCAmelCase )
UpperCAmelCase_ =LinearConstraint(_lowerCAmelCase , 0 , 0 )
UpperCAmelCase_ =Bounds(0 , self.regularization )
UpperCAmelCase_ =minimize(
_lowerCAmelCase , np.ones(_lowerCAmelCase ) , bounds=_lowerCAmelCase , constraints=[ly_contraint] ).x
UpperCAmelCase_ =l_star
# calculating mean offset of separation plane to points
UpperCAmelCase_ =0
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
UpperCAmelCase_ =s / n
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: ndarray ) -> int:
'''simple docstring'''
UpperCAmelCase_ =sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , _lowerCAmelCase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 |
"""simple docstring"""
import math
def _SCREAMING_SNAKE_CASE ( _lowercase : list , _lowercase : int = 0 , _lowercase : int = 0 ) ->list:
'''simple docstring'''
a : Optional[Any] = end or len(_lowercase )
for i in range(_lowercase , _lowercase ):
a : List[str] = i
a : Any = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
a : Optional[Any] = array[temp_index - 1]
temp_index -= 1
a : Any = temp_index_value
return array
def _SCREAMING_SNAKE_CASE ( _lowercase : list , _lowercase : int , _lowercase : int ) ->None: # Max Heap
'''simple docstring'''
a : Tuple = index
a : List[Any] = 2 * index + 1 # Left Node
a : Dict = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
a : Dict = left_index
if right_index < heap_size and array[largest] < array[right_index]:
a : int = right_index
if largest != index:
a, a : List[Any] = array[largest], array[index]
heapify(_lowercase , _lowercase , _lowercase )
def _SCREAMING_SNAKE_CASE ( _lowercase : list ) ->list:
'''simple docstring'''
a : int = len(_lowercase )
for i in range(n // 2 , -1 , -1 ):
heapify(_lowercase , _lowercase , _lowercase )
for i in range(n - 1 , 0 , -1 ):
a, a : str = array[0], array[i]
heapify(_lowercase , 0 , _lowercase )
return array
def _SCREAMING_SNAKE_CASE ( _lowercase : list , _lowercase : int , _lowercase : int , _lowercase : int ) ->int:
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def _SCREAMING_SNAKE_CASE ( _lowercase : list , _lowercase : int , _lowercase : int , _lowercase : int ) ->int:
'''simple docstring'''
a : List[Any] = low
a : int = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
a, a : Union[str, Any] = array[j], array[i]
i += 1
def _SCREAMING_SNAKE_CASE ( _lowercase : list ) ->list:
'''simple docstring'''
if len(_lowercase ) == 0:
return array
a : Tuple = 2 * math.ceil(math.loga(len(_lowercase ) ) )
a : List[str] = 16
return intro_sort(_lowercase , 0 , len(_lowercase ) , _lowercase , _lowercase )
def _SCREAMING_SNAKE_CASE ( _lowercase : list , _lowercase : int , _lowercase : int , _lowercase : int , _lowercase : int ) ->list:
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(_lowercase )
max_depth -= 1
a : List[str] = median_of_a(_lowercase , _lowercase , start + ((end - start) // 2) + 1 , end - 1 )
a : Tuple = partition(_lowercase , _lowercase , _lowercase , _lowercase )
intro_sort(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
a : Union[str, Any] = p
return insertion_sort(_lowercase , _lowercase , _lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Optional[int] = input('''Enter numbers separated by a comma : ''').strip()
a : List[str] = [float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 633 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCamelCase__ = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
lowercase_ : Tuple = XLNetConfig.from_json_file(_UpperCamelCase )
lowercase_ : Union[str, Any] = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
lowercase_ : Dict = finetuning_task
lowercase_ : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowercase_ : Any = XLNetForSequenceClassification(_UpperCamelCase )
elif "squad" in finetuning_task:
lowercase_ : Optional[int] = finetuning_task
lowercase_ : Optional[int] = XLNetForQuestionAnswering(_UpperCamelCase )
else:
lowercase_ : Union[str, Any] = XLNetLMHeadModel(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
lowercase_ : Optional[Any] = os.path.join(_UpperCamelCase , _UpperCamelCase )
lowercase_ : Dict = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(F"""Save PyTorch model to {os.path.abspath(_UpperCamelCase )}""" )
torch.save(model.state_dict() , _UpperCamelCase )
print(F"""Save configuration file to {os.path.abspath(_UpperCamelCase )}""" )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
UpperCamelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 700 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Tuple = 'linear'
__lowerCamelCase: Any = 'cosine'
__lowerCamelCase: Optional[Any] = 'cosine_with_restarts'
__lowerCamelCase: Tuple = 'polynomial'
__lowerCamelCase: int = 'constant'
__lowerCamelCase: Optional[Any] = 'constant_with_warmup'
__lowerCamelCase: List[str] = 'piecewise_constant'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
return LambdaLR(_UpperCamelCase , lambda _UpperCamelCase : 1 , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1.0 , _UpperCamelCase ) )
return 1.0
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
lowercase_ : List[Any] = {}
lowercase_ : Dict = step_rules.split("," )
for rule_str in rule_list[:-1]:
lowercase_ , lowercase_ : Any = rule_str.split(":" )
lowercase_ : List[Any] = int(_UpperCamelCase )
lowercase_ : int = float(_UpperCamelCase )
lowercase_ : Optional[int] = value
lowercase_ : Union[str, Any] = float(rule_list[-1] )
def create_rules_function(_UpperCamelCase , _UpperCamelCase ):
def rule_func(_UpperCamelCase ) -> float:
lowercase_ : Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_UpperCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowercase_ : Optional[int] = create_rules_function(_UpperCamelCase , _UpperCamelCase )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=-1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0.5 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : List[str] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_UpperCamelCase ) * 2.0 * progress )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_UpperCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=1e-7 , _UpperCamelCase=1.0 , _UpperCamelCase=-1 ):
"""simple docstring"""
lowercase_ : Dict = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowercase_ : int = lr_init - lr_end
lowercase_ : Optional[int] = num_training_steps - num_warmup_steps
lowercase_ : Optional[Any] = 1 - (current_step - num_warmup_steps) / decay_steps
lowercase_ : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 1 , _UpperCamelCase = 1.0 , _UpperCamelCase = -1 , ):
"""simple docstring"""
lowercase_ : Any = SchedulerType(_UpperCamelCase )
lowercase_ : List[Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_UpperCamelCase , last_epoch=_UpperCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_UpperCamelCase , step_rules=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_UpperCamelCase , num_warmup_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , num_cycles=_UpperCamelCase , last_epoch=_UpperCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , power=_UpperCamelCase , last_epoch=_UpperCamelCase , )
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
| 640 | 0 |
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
a__ : List[str] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
a__ : Optional[Any] = logging.getLogger()
def __snake_case ( ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCAmelCase = parser.parse_args()
return args.f
def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any]="eval" ) -> int:
"""simple docstring"""
UpperCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , f"{split}_results.json" )
if os.path.exists(SCREAMING_SNAKE_CASE_ ):
with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as f:
return json.load(SCREAMING_SNAKE_CASE_ )
raise ValueError(f"can't find {path}" )
a__ : Union[str, Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
def __snake_case ( self : str ):
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(a__ , '''argv''' , a__ ):
run_flax_glue.main()
UpperCAmelCase = get_results(a__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def __snake_case ( self : List[Any] ):
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(a__ , '''argv''' , a__ ):
run_clm_flax.main()
UpperCAmelCase = get_results(a__ )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(a__ , '''argv''' , a__ ):
run_summarization_flax.main()
UpperCAmelCase = get_results(a__ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def __snake_case ( self : List[Any] ):
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(a__ , '''argv''' , a__ ):
run_mlm_flax.main()
UpperCAmelCase = get_results(a__ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def __snake_case ( self : str ):
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(a__ , '''argv''' , a__ ):
run_ta_mlm_flax.main()
UpperCAmelCase = get_results(a__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def __snake_case ( self : Tuple ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
UpperCAmelCase = 7 if get_gpu_count() > 1 else 2
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(a__ , '''argv''' , a__ ):
run_flax_ner.main()
UpperCAmelCase = get_results(a__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def __snake_case ( self : str ):
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(a__ , '''argv''' , a__ ):
run_qa.main()
UpperCAmelCase = get_results(a__ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 51 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 109 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : Dict = {
"""configuration_blip_2""": [
"""BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Blip2Config""",
"""Blip2QFormerConfig""",
"""Blip2VisionConfig""",
],
"""processing_blip_2""": ["""Blip2Processor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = [
"""BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Blip2Model""",
"""Blip2QFormerModel""",
"""Blip2PreTrainedModel""",
"""Blip2ForConditionalGeneration""",
"""Blip2VisionModel""",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 525 | # Function to print upper half of diamond (pyramid)
def __A ( _A ):
"""simple docstring"""
for i in range(0 , _A ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(" " , end="" )
for _ in range(0 , i + 1 ): # printing stars
print("* " , end="" )
print()
def __A ( _A ):
"""simple docstring"""
for i in range(_A , 0 , -1 ):
for _ in range(_A , 0 , -1 ): # printing stars
print("* " , end="" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(" " , end="" )
def __A ( _A ):
"""simple docstring"""
if n <= 0:
print(" ... .... nothing printing :(" )
return
floyd(_A ) # upper half
reverse_floyd(_A ) # lower half
if __name__ == "__main__":
print(R"""| /\ | |- | |- |--| |\ /| |-""")
print(R"""|/ \| |- |_ |_ |__| | \/ | |_""")
SCREAMING_SNAKE_CASE : Tuple = 1
while K:
SCREAMING_SNAKE_CASE : Tuple = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
SCREAMING_SNAKE_CASE : Optional[int] = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 525 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __SCREAMING_SNAKE_CASE :
snake_case_ = LEDConfig
snake_case_ = {}
snake_case_ = """gelu"""
def __init__( self : List[str] , __lowercase : Optional[Any] , __lowercase : str=13 , __lowercase : Tuple=7 , __lowercase : List[str]=True , __lowercase : List[Any]=False , __lowercase : str=99 , __lowercase : Dict=32 , __lowercase : Any=2 , __lowercase : str=4 , __lowercase : Dict=37 , __lowercase : Optional[int]=0.1 , __lowercase : List[str]=0.1 , __lowercase : Tuple=20 , __lowercase : str=2 , __lowercase : Dict=1 , __lowercase : Dict=0 , __lowercase : List[Any]=4 , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Any =parent
SCREAMING_SNAKE_CASE__ : List[str] =batch_size
SCREAMING_SNAKE_CASE__ : Dict =seq_length
SCREAMING_SNAKE_CASE__ : List[str] =is_training
SCREAMING_SNAKE_CASE__ : Any =use_labels
SCREAMING_SNAKE_CASE__ : Tuple =vocab_size
SCREAMING_SNAKE_CASE__ : Tuple =hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[Any] =num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] =intermediate_size
SCREAMING_SNAKE_CASE__ : int =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[int] =eos_token_id
SCREAMING_SNAKE_CASE__ : str =pad_token_id
SCREAMING_SNAKE_CASE__ : Any =bos_token_id
SCREAMING_SNAKE_CASE__ : int =attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
SCREAMING_SNAKE_CASE__ : List[str] =(
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __magic_name__ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Dict =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Dict =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : List[Any] =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
SCREAMING_SNAKE_CASE__ : List[str] =prepare_led_inputs_dict(__lowercase , __lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : str =tf.concat(
[tf.zeros_like(__lowercase )[:, :-1], tf.ones_like(__lowercase )[:, -1:]] , axis=-1 , )
SCREAMING_SNAKE_CASE__ : Optional[int] =global_attention_mask
return config, inputs_dict
def __magic_name__ ( self : Tuple , __lowercase : Optional[int] , __lowercase : Union[str, Any] ) -> Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =TFLEDModel(config=__lowercase ).get_decoder()
SCREAMING_SNAKE_CASE__ : Optional[int] =inputs_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ : List[str] =input_ids[:1, :]
SCREAMING_SNAKE_CASE__ : List[str] =inputs_dict['''attention_mask'''][:1, :]
SCREAMING_SNAKE_CASE__ : Tuple =1
# first forward pass
SCREAMING_SNAKE_CASE__ : Tuple =model(__lowercase , attention_mask=__lowercase , use_cache=__lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : Tuple =ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ : List[Any] =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Optional[Any] =tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE__ : Dict =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(__lowercase , attention_mask=__lowercase )[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase , attention_mask=__lowercase , past_key_values=__lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE__ : Optional[int] =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE__ : List[str] =output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE__ : str =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowercase , __lowercase , rtol=1e-3 )
def _a( UpperCamelCase__ : List[Any], UpperCamelCase__ : List[Any], UpperCamelCase__ : int, UpperCamelCase__ : Union[str, Any]=None, UpperCamelCase__ : Any=None, UpperCamelCase__ : Optional[Any]=None, UpperCamelCase__ : Tuple=None, ):
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : int =tf.cast(tf.math.not_equal(UpperCamelCase__, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
SCREAMING_SNAKE_CASE__ : Any =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
snake_case_ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
snake_case_ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
snake_case_ = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = False
def __magic_name__ ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE__ : str =TFLEDModelTester(self )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =ConfigTester(self , config_class=__lowercase )
def __magic_name__ ( self : str ) -> Optional[int]:
self.config_tester.run_common_tests()
def __magic_name__ ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowercase )
def __magic_name__ ( self : Tuple ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : int =tf.zeros_like(inputs_dict['''attention_mask'''] )
SCREAMING_SNAKE_CASE__ : Optional[int] =2
SCREAMING_SNAKE_CASE__ : List[str] =tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
SCREAMING_SNAKE_CASE__ : List[Any] =True
SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.seq_length
SCREAMING_SNAKE_CASE__ : Dict =self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__lowercase : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.decoder_attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__lowercase : Any ):
SCREAMING_SNAKE_CASE__ : List[str] =[t.numpy() for t in outputs.encoder_attentions]
SCREAMING_SNAKE_CASE__ : Optional[Any] =[t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Tuple =True
SCREAMING_SNAKE_CASE__ : Tuple =False
SCREAMING_SNAKE_CASE__ : int =False
SCREAMING_SNAKE_CASE__ : Any =model_class(__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =model(self._prepare_for_class(__lowercase , __lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] =len(__lowercase )
self.assertEqual(config.output_hidden_states , __lowercase )
check_encoder_attentions_output(__lowercase )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE__ : List[Any] =model_class(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =model(self._prepare_for_class(__lowercase , __lowercase ) )
self.assertEqual(config.output_hidden_states , __lowercase )
check_decoder_attentions_output(__lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE__ : Tuple =True
SCREAMING_SNAKE_CASE__ : Optional[int] =model_class(__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =model(self._prepare_for_class(__lowercase , __lowercase ) )
self.assertEqual(config.output_hidden_states , __lowercase )
check_encoder_attentions_output(__lowercase )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE__ : List[str] =True
SCREAMING_SNAKE_CASE__ : str =True
SCREAMING_SNAKE_CASE__ : List[str] =model_class(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(self._prepare_for_class(__lowercase , __lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__lowercase ) )
self.assertEqual(model.config.output_hidden_states , __lowercase )
check_encoder_attentions_output(__lowercase )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def __magic_name__ ( self : str ) -> int:
pass
def __magic_name__ ( self : str ) -> Union[str, Any]:
# TODO: Head-masking not yet implement
pass
def _a( UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
return tf.constant(UpperCamelCase__, dtype=tf.intaa )
a_ = 1E-4
@slow
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
SCREAMING_SNAKE_CASE__ : Optional[Any] =_long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
SCREAMING_SNAKE_CASE__ : int =_long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
SCREAMING_SNAKE_CASE__ : int =prepare_led_inputs_dict(model.config , __lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(**__lowercase )[0]
SCREAMING_SNAKE_CASE__ : Any =(1, 10_24, 7_68)
self.assertEqual(output.shape , __lowercase )
# change to expected output here
SCREAMING_SNAKE_CASE__ : Tuple =tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , __lowercase , atol=1e-3 )
def __magic_name__ ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Any =TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
SCREAMING_SNAKE_CASE__ : Tuple =_long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
SCREAMING_SNAKE_CASE__ : str =_long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
SCREAMING_SNAKE_CASE__ : Optional[int] =prepare_led_inputs_dict(model.config , __lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =model(**__lowercase )[0]
SCREAMING_SNAKE_CASE__ : List[str] =(1, 10_24, model.config.vocab_size)
self.assertEqual(output.shape , __lowercase )
# change to expected output here
SCREAMING_SNAKE_CASE__ : Dict =tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , __lowercase , atol=1e-3 , rtol=1e-3 ) | 296 |
'''simple docstring'''
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _a( UpperCamelCase__ : Dict, UpperCamelCase__ : Tuple, UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =OmegaConf.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int =torch.load(UpperCamelCase__, map_location='''cpu''' )['''model''']
SCREAMING_SNAKE_CASE__ : int =list(state_dict.keys() )
# extract state_dict for VQVAE
SCREAMING_SNAKE_CASE__ : Any ={}
SCREAMING_SNAKE_CASE__ : int ='''first_stage_model.'''
for key in keys:
if key.startswith(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] =state_dict[key]
# extract state_dict for UNetLDM
SCREAMING_SNAKE_CASE__ : List[str] ={}
SCREAMING_SNAKE_CASE__ : Dict ='''model.diffusion_model.'''
for key in keys:
if key.startswith(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] =state_dict[key]
SCREAMING_SNAKE_CASE__ : Dict =config.model.params.first_stage_config.params
SCREAMING_SNAKE_CASE__ : List[str] =config.model.params.unet_config.params
SCREAMING_SNAKE_CASE__ : Dict =VQModel(**UpperCamelCase__ ).eval()
vqvae.load_state_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =UNetLDMModel(**UpperCamelCase__ ).eval()
unet.load_state_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int =DDIMScheduler(
timesteps=config.model.params.timesteps, beta_schedule='''scaled_linear''', beta_start=config.model.params.linear_start, beta_end=config.model.params.linear_end, clip_sample=UpperCamelCase__, )
SCREAMING_SNAKE_CASE__ : Any =LDMPipeline(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
pipeline.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
a_ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path) | 296 | 1 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
__UpperCAmelCase = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
__UpperCAmelCase = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
__UpperCAmelCase = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
"""simple docstring"""
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def __magic_name__ ( self : str , A_ : Union[str, Any] , A_ : Any ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
_lowerCAmelCase : Tuple = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
_lowerCAmelCase : Optional[int] = evaluate(dataset=A_ , predictions=A_ )
return score
| 708 |
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def _snake_case ( SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase : str = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
_lowerCAmelCase : Union[str, Any] = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" )
_lowerCAmelCase : int = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" )
_lowerCAmelCase : Any = key.replace("heads.cmd.itm_head.cls" , "itm_head" )
_lowerCAmelCase : Optional[Any] = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" )
_lowerCAmelCase : Any = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" )
_lowerCAmelCase : List[Any] = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" )
_lowerCAmelCase : Union[str, Any] = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" )
_lowerCAmelCase : Optional[int] = key.replace("mm_text_projection" , "flava.text_to_mm_projection" )
_lowerCAmelCase : int = key.replace("mm_image_projection" , "flava.image_to_mm_projection" )
_lowerCAmelCase : List[str] = key.replace("image_encoder.module" , "flava.image_model" )
_lowerCAmelCase : Optional[int] = key.replace("text_encoder.module" , "flava.text_model" )
_lowerCAmelCase : List[str] = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" )
_lowerCAmelCase : Dict = key.replace("mm_encoder.module" , "flava.multimodal_model" )
_lowerCAmelCase : Dict = key.replace("text_projection" , "flava.text_projection" )
_lowerCAmelCase : Optional[Any] = key.replace("image_projection" , "flava.image_projection" )
_lowerCAmelCase : List[Any] = value.float()
for key, value in codebook_state_dict.items():
_lowerCAmelCase : int = value
return upgrade
@torch.no_grad()
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> Tuple:
"""simple docstring"""
if config_path is not None:
_lowerCAmelCase : Dict = FlavaConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase : Dict = FlavaConfig()
_lowerCAmelCase : List[Any] = FlavaForPreTraining(SCREAMING_SNAKE_CASE ).eval()
_lowerCAmelCase : Optional[int] = convert_dalle_checkpoint(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , save_checkpoint=SCREAMING_SNAKE_CASE )
if os.path.exists(SCREAMING_SNAKE_CASE ):
_lowerCAmelCase : Dict = torch.load(SCREAMING_SNAKE_CASE , map_location="cpu" )
else:
_lowerCAmelCase : Tuple = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location="cpu" )
_lowerCAmelCase : Optional[int] = upgrade_state_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
hf_model.load_state_dict(SCREAMING_SNAKE_CASE )
_lowerCAmelCase : Optional[Any] = hf_model.state_dict()
_lowerCAmelCase : Optional[Any] = count_parameters(SCREAMING_SNAKE_CASE )
_lowerCAmelCase : Optional[Any] = count_parameters(SCREAMING_SNAKE_CASE ) + count_parameters(SCREAMING_SNAKE_CASE )
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__UpperCAmelCase = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 503 | 0 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
lowerCamelCase = logging.getLogger(__name__)
class A ( UpperCamelCase_ ):
UpperCamelCase__ : Any ='token-classification'
def __init__( self : Tuple , lowercase_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
if type(lowercase_ ) == dict:
_lowerCamelCase : Union[str, Any] =Namespace(**lowercase_ )
_lowerCamelCase : Union[str, Any] =import_module('tasks' )
try:
_lowerCamelCase : Dict =getattr(lowercase_ , hparams.task_type )
_lowerCamelCase : TokenClassificationTask =token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
_lowerCamelCase : Dict =self.token_classification_task.get_labels(hparams.labels )
_lowerCamelCase : Union[str, Any] =CrossEntropyLoss().ignore_index
super().__init__(lowercase_ , len(self.labels ) , self.mode )
def lowerCamelCase ( self : List[Any] , **lowercase_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
return self.model(**lowercase_ )
def lowerCamelCase ( self : Any , lowercase_ : Optional[Any] , lowercase_ : int ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type != "distilbert":
_lowerCamelCase : str =(
batch[2] if self.config.model_type in ['bert', 'xlnet'] else None
) # XLM and RoBERTa don"t use token_type_ids
_lowerCamelCase : Optional[Any] =self(**lowercase_ )
_lowerCamelCase : str =outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def lowerCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : Any =self.hparams
for mode in ["train", "dev", "test"]:
_lowerCamelCase : str =self._feature_file(lowercase_ )
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , lowercase_ )
_lowerCamelCase : Optional[int] =torch.load(lowercase_ )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_lowerCamelCase : Optional[Any] =self.token_classification_task.read_examples_from_file(args.data_dir , lowercase_ )
_lowerCamelCase : int =self.token_classification_task.convert_examples_to_features(
lowercase_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['xlnet'] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['xlnet'] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=lowercase_ , pad_on_left=bool(self.config.model_type in ['xlnet'] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('Saving features into cached file %s' , lowercase_ )
torch.save(lowercase_ , lowercase_ )
def lowerCamelCase ( self : str , lowercase_ : int , lowercase_ : int , lowercase_ : bool = False ) -> DataLoader:
"""simple docstring"""
_lowerCamelCase : Dict =self._feature_file(lowercase_ )
logger.info('Loading features from cached file %s' , lowercase_ )
_lowerCamelCase : str =torch.load(lowercase_ )
_lowerCamelCase : Optional[Any] =torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_lowerCamelCase : str =torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
_lowerCamelCase : int =torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
_lowerCamelCase : Optional[Any] =torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
_lowerCamelCase : Tuple =torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , batch_size=lowercase_ )
def lowerCamelCase ( self : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Tuple ) -> Tuple:
"""simple docstring"""
"""Compute validation""" ""
_lowerCamelCase : Optional[Any] ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type != "distilbert":
_lowerCamelCase : Dict =(
batch[2] if self.config.model_type in ['bert', 'xlnet'] else None
) # XLM and RoBERTa don"t use token_type_ids
_lowerCamelCase : Tuple =self(**lowercase_ )
_lowerCamelCase , _lowerCamelCase : List[str] =outputs[:2]
_lowerCamelCase : Tuple =logits.detach().cpu().numpy()
_lowerCamelCase : List[str] =inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def lowerCamelCase ( self : Optional[Any] , lowercase_ : List[Any] ) -> List[str]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =torch.stack([x['val_loss'] for x in outputs] ).mean()
_lowerCamelCase : List[str] =np.concatenate([x['pred'] for x in outputs] , axis=0 )
_lowerCamelCase : int =np.argmax(lowercase_ , axis=2 )
_lowerCamelCase : List[str] =np.concatenate([x['target'] for x in outputs] , axis=0 )
_lowerCamelCase : Optional[int] =dict(enumerate(self.labels ) )
_lowerCamelCase : Any =[[] for _ in range(out_label_ids.shape[0] )]
_lowerCamelCase : Optional[int] =[[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
_lowerCamelCase : Optional[int] ={
'val_loss': val_loss_mean,
'accuracy_score': accuracy_score(lowercase_ , lowercase_ ),
'precision': precision_score(lowercase_ , lowercase_ ),
'recall': recall_score(lowercase_ , lowercase_ ),
'f1': fa_score(lowercase_ , lowercase_ ),
}
_lowerCamelCase : List[str] =dict(results.items() )
_lowerCamelCase : Optional[int] =results
return ret, preds_list, out_label_list
def lowerCamelCase ( self : List[Any] , lowercase_ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] =self._eval_end(lowercase_ )
_lowerCamelCase : List[str] =ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def lowerCamelCase ( self : Union[str, Any] , lowercase_ : int ) -> str:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any =self._eval_end(lowercase_ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
_lowerCamelCase : List[str] =ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def lowerCamelCase ( lowercase_ : Optional[Any] , lowercase_ : Dict ) -> List[str]:
"""simple docstring"""
BaseTransformer.add_model_specific_args(lowercase_ , lowercase_ )
parser.add_argument(
'--task_type' , default='NER' , type=lowercase_ , help='Task type to fine tune in training (e.g. NER, POS, etc)' )
parser.add_argument(
'--max_seq_length' , default=128 , type=lowercase_ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--labels' , default='' , type=lowercase_ , help='Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.' , )
parser.add_argument(
'--gpus' , default=0 , type=lowercase_ , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
lowerCamelCase = NERTransformer.add_model_specific_args(parser, os.getcwd())
lowerCamelCase = parser.parse_args()
lowerCamelCase = NERTransformer(args)
lowerCamelCase = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
lowerCamelCase = sorted(glob.glob(os.path.join(args.output_dir, 'checkpoint-epoch=*.ckpt'), recursive=True))
lowerCamelCase = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 464 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCamelCase = numpy.array([0, 0])
lowerCamelCase = numpy.array([0.5, 0.866_0254])
lowerCamelCase = numpy.array([1, 0])
lowerCamelCase = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a_ ( SCREAMING_SNAKE_CASE__ : list[numpy.ndarray] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
_lowerCamelCase : List[Any] =initial_vectors
for _ in range(SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase : List[str] =iteration_step(SCREAMING_SNAKE_CASE__ )
return vectors
def a_ ( SCREAMING_SNAKE_CASE__ : list[numpy.ndarray] ):
'''simple docstring'''
_lowerCamelCase : Dict =[]
for i, start_vector in enumerate(vectors[:-1] ):
_lowerCamelCase : Tuple =vectors[i + 1]
new_vectors.append(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : Tuple =end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a_ ( SCREAMING_SNAKE_CASE__ : numpy.ndarray , SCREAMING_SNAKE_CASE__ : float ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] =numpy.radians(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase , _lowerCamelCase : List[Any] =numpy.cos(SCREAMING_SNAKE_CASE__ ), numpy.sin(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : str =numpy.array(((c, -s), (s, c)) )
return numpy.dot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def a_ ( SCREAMING_SNAKE_CASE__ : list[numpy.ndarray] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] =plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_lowerCamelCase , _lowerCamelCase : Tuple =zip(*SCREAMING_SNAKE_CASE__ )
plt.plot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 464 | 1 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def __lowercase ( _a ):
return np.maximum(0 , _a )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 485 |
"""simple docstring"""
def __lowercase ( _a , _a ):
snake_case_ : str = [0 for i in range(r + 1 )]
# nc0 = 1
snake_case_ : int = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
snake_case_ : Any = min(_a , _a )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 485 | 1 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
lowercase_ = 42
lowercase_ = jnp.floataa
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : str , _lowerCAmelCase : Dict ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = hidden_states.shape
SCREAMING_SNAKE_CASE_ = jax.image.resize(
_lowerCAmelCase , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
SCREAMING_SNAKE_CASE_ = self.conv(_lowerCAmelCase )
return hidden_states
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
lowercase_ = 42
lowercase_ = jnp.floataa
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Tuple , _lowerCAmelCase : int ):
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
SCREAMING_SNAKE_CASE_ = self.conv(_lowerCAmelCase )
return hidden_states
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
lowercase_ = 42
lowercase_ = None
lowercase_ = 0.0
lowercase_ = None
lowercase_ = jnp.floataa
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = self.in_channels if self.out_channels is None else self.out_channels
SCREAMING_SNAKE_CASE_ = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
SCREAMING_SNAKE_CASE_ = nn.Conv(
_lowerCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
SCREAMING_SNAKE_CASE_ = nn.Dense(_lowerCAmelCase , dtype=self.dtype )
SCREAMING_SNAKE_CASE_ = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
SCREAMING_SNAKE_CASE_ = nn.Dropout(self.dropout_prob )
SCREAMING_SNAKE_CASE_ = nn.Conv(
_lowerCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
SCREAMING_SNAKE_CASE_ = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
SCREAMING_SNAKE_CASE_ = None
if use_nin_shortcut:
SCREAMING_SNAKE_CASE_ = nn.Conv(
_lowerCAmelCase , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any]=True ):
SCREAMING_SNAKE_CASE_ = hidden_states
SCREAMING_SNAKE_CASE_ = self.norma(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = nn.swish(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.conva(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.time_emb_proj(nn.swish(_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = jnp.expand_dims(jnp.expand_dims(_lowerCAmelCase , 1 ) , 1 )
SCREAMING_SNAKE_CASE_ = hidden_states + temb
SCREAMING_SNAKE_CASE_ = self.norma(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = nn.swish(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.dropout(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.conva(_lowerCAmelCase )
if self.conv_shortcut is not None:
SCREAMING_SNAKE_CASE_ = self.conv_shortcut(_lowerCAmelCase )
return hidden_states + residual | 31 |
'''simple docstring'''
from __future__ import annotations
import math
def lowercase__( _UpperCamelCase : list , _UpperCamelCase : list )-> list:
"""simple docstring"""
if len(_UpperCamelCase ) != 2 or len(a[0] ) != 2 or len(_UpperCamelCase ) != 2 or len(b[0] ) != 2:
raise Exception("Matrices are not 2x2" )
_UpperCamelCase = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def lowercase__( _UpperCamelCase : list , _UpperCamelCase : list )-> Any:
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_UpperCamelCase ) )
]
def lowercase__( _UpperCamelCase : list , _UpperCamelCase : list )-> Dict:
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_UpperCamelCase ) )
]
def lowercase__( _UpperCamelCase : list )-> tuple[list, list, list, list]:
"""simple docstring"""
if len(_UpperCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("Odd matrices are not supported!" )
_UpperCamelCase = len(_UpperCamelCase )
_UpperCamelCase = matrix_length // 2
_UpperCamelCase = [[a[i][j] for j in range(_UpperCamelCase , _UpperCamelCase )] for i in range(_UpperCamelCase )]
_UpperCamelCase = [
[a[i][j] for j in range(_UpperCamelCase , _UpperCamelCase )] for i in range(_UpperCamelCase , _UpperCamelCase )
]
_UpperCamelCase = [[a[i][j] for j in range(_UpperCamelCase )] for i in range(_UpperCamelCase )]
_UpperCamelCase = [[a[i][j] for j in range(_UpperCamelCase )] for i in range(_UpperCamelCase , _UpperCamelCase )]
return top_left, top_right, bot_left, bot_right
def lowercase__( _UpperCamelCase : list )-> tuple[int, int]:
"""simple docstring"""
return len(_UpperCamelCase ), len(matrix[0] )
def lowercase__( _UpperCamelCase : list )-> None:
"""simple docstring"""
print("\n".join(str(_UpperCamelCase ) for line in matrix ) )
def lowercase__( _UpperCamelCase : list , _UpperCamelCase : list )-> list:
"""simple docstring"""
if matrix_dimensions(_UpperCamelCase ) == (2, 2):
return default_matrix_multiplication(_UpperCamelCase , _UpperCamelCase )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = split_matrix(_UpperCamelCase )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = split_matrix(_UpperCamelCase )
_UpperCamelCase = actual_strassen(_UpperCamelCase , matrix_subtraction(_UpperCamelCase , _UpperCamelCase ) )
_UpperCamelCase = actual_strassen(matrix_addition(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
_UpperCamelCase = actual_strassen(matrix_addition(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
_UpperCamelCase = actual_strassen(_UpperCamelCase , matrix_subtraction(_UpperCamelCase , _UpperCamelCase ) )
_UpperCamelCase = actual_strassen(matrix_addition(_UpperCamelCase , _UpperCamelCase ) , matrix_addition(_UpperCamelCase , _UpperCamelCase ) )
_UpperCamelCase = actual_strassen(matrix_subtraction(_UpperCamelCase , _UpperCamelCase ) , matrix_addition(_UpperCamelCase , _UpperCamelCase ) )
_UpperCamelCase = actual_strassen(matrix_subtraction(_UpperCamelCase , _UpperCamelCase ) , matrix_addition(_UpperCamelCase , _UpperCamelCase ) )
_UpperCamelCase = matrix_addition(matrix_subtraction(matrix_addition(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase ) , _UpperCamelCase )
_UpperCamelCase = matrix_addition(_UpperCamelCase , _UpperCamelCase )
_UpperCamelCase = matrix_addition(_UpperCamelCase , _UpperCamelCase )
_UpperCamelCase = matrix_subtraction(matrix_subtraction(matrix_addition(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase ) , _UpperCamelCase )
# construct the new matrix from our 4 quadrants
_UpperCamelCase = []
for i in range(len(_UpperCamelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(_UpperCamelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def lowercase__( _UpperCamelCase : list , _UpperCamelCase : list )-> list:
"""simple docstring"""
if matrix_dimensions(_UpperCamelCase )[1] != matrix_dimensions(_UpperCamelCase )[0]:
_UpperCamelCase = (
"Unable to multiply these matrices, please check the dimensions.\n"
f"Matrix A: {matrixa}\n"
f"Matrix B: {matrixa}"
)
raise Exception(_UpperCamelCase )
_UpperCamelCase = matrix_dimensions(_UpperCamelCase )
_UpperCamelCase = matrix_dimensions(_UpperCamelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
_UpperCamelCase = max(*_UpperCamelCase , *_UpperCamelCase )
_UpperCamelCase = int(math.pow(2 , math.ceil(math.loga(_UpperCamelCase ) ) ) )
_UpperCamelCase = matrixa
_UpperCamelCase = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , _UpperCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _UpperCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _UpperCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
_UpperCamelCase = actual_strassen(_UpperCamelCase , _UpperCamelCase )
# Removing the additional zeros
for i in range(0 , _UpperCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _UpperCamelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
snake_case_ : Optional[int] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
snake_case_ : Optional[Any] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 138 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ["pixel_values"]
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = True , lowercase_ = None , lowercase_ = True , lowercase_ = 1 / 2_5_5 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None:
super().__init__(**lowercase_ )
UpperCAmelCase = size if size is not None else {'shortest_edge': 2_5_6}
UpperCAmelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ )
UpperCAmelCase = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
UpperCAmelCase = get_size_dict(lowercase_ )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = resample
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a_ ( self , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
UpperCAmelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
UpperCAmelCase = get_resize_output_image_size(lowercase_ , size=size['shortest_edge'] , default_to_square=lowercase_ )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def a_ ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
UpperCAmelCase = get_size_dict(lowercase_ )
return center_crop(lowercase_ , size=(size['height'], size['width']) , data_format=lowercase_ , **lowercase_ )
def a_ ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ ) -> np.ndarray:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def a_ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def a_ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> str:
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ )
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase = get_size_dict(lowercase_ )
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
UpperCAmelCase = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_center_crop:
UpperCAmelCase = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
UpperCAmelCase = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
UpperCAmelCase = {'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 183 |
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class _UpperCAmelCase ( nn.Module ):
def __init__( self ) -> Any:
super().__init__()
UpperCAmelCase = nn.Linear(3 , 4 )
UpperCAmelCase = nn.BatchNormad(4 )
UpperCAmelCase = nn.Linear(4 , 5 )
def a_ ( self , lowercase_ ) -> Any:
return self.lineara(self.batchnorm(self.lineara(lowercase_ ) ) )
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
def a_ ( self , lowercase_ , *lowercase_ , **lowercase_ ) -> List[str]:
return (args[0] + 1,) + args[1:], kwargs
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
def a_ ( self , lowercase_ , lowercase_ ) -> Union[str, Any]:
return output + 1
class _UpperCAmelCase ( unittest.TestCase ):
def a_ ( self ) -> Optional[int]:
UpperCAmelCase = ModelForTest()
UpperCAmelCase = ModelHook()
add_hook_to_module(lowercase_ , lowercase_ )
self.assertEqual(test_model._hf_hook , lowercase_ )
self.assertTrue(hasattr(lowercase_ , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(lowercase_ )
self.assertFalse(hasattr(lowercase_ , '_hf_hook' ) )
self.assertFalse(hasattr(lowercase_ , '_old_forward' ) )
def a_ ( self ) -> Any:
UpperCAmelCase = ModelForTest()
UpperCAmelCase = ModelHook()
add_hook_to_module(lowercase_ , lowercase_ )
add_hook_to_module(lowercase_ , lowercase_ , append=lowercase_ )
self.assertEqual(isinstance(test_model._hf_hook , lowercase_ ) , lowercase_ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(lowercase_ , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(lowercase_ )
self.assertFalse(hasattr(lowercase_ , '_hf_hook' ) )
self.assertFalse(hasattr(lowercase_ , '_old_forward' ) )
def a_ ( self ) -> Any:
UpperCAmelCase = ModelForTest()
UpperCAmelCase = torch.randn(2 , 3 )
UpperCAmelCase = test_model(x + 1 )
UpperCAmelCase = test_model(x + 2 )
UpperCAmelCase = PreForwardHook()
add_hook_to_module(lowercase_ , lowercase_ )
UpperCAmelCase = test_model(lowercase_ )
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCAmelCase = PreForwardHook()
add_hook_to_module(lowercase_ , lowercase_ )
UpperCAmelCase = test_model(lowercase_ )
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
UpperCAmelCase = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(lowercase_ , lowercase_ )
UpperCAmelCase = test_model(lowercase_ )
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-5 )
def a_ ( self ) -> List[str]:
UpperCAmelCase = ModelForTest()
UpperCAmelCase = torch.randn(2 , 3 )
UpperCAmelCase = test_model(lowercase_ )
UpperCAmelCase = PostForwardHook()
add_hook_to_module(lowercase_ , lowercase_ )
UpperCAmelCase = test_model(lowercase_ )
self.assertTrue(torch.allclose(lowercase_ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCAmelCase = PostForwardHook()
add_hook_to_module(lowercase_ , lowercase_ )
UpperCAmelCase = test_model(lowercase_ )
self.assertTrue(torch.allclose(lowercase_ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
UpperCAmelCase = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(lowercase_ , lowercase_ )
UpperCAmelCase = test_model(lowercase_ )
assert torch.allclose(lowercase_ , output + 2 , atol=1E-5 )
def a_ ( self ) -> Tuple:
UpperCAmelCase = ModelForTest()
UpperCAmelCase = torch.randn(2 , 3 )
UpperCAmelCase = test_model(lowercase_ )
UpperCAmelCase = PostForwardHook()
add_hook_to_module(lowercase_ , lowercase_ )
UpperCAmelCase = test_model(lowercase_ )
self.assertTrue(torch.allclose(lowercase_ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
UpperCAmelCase = True
UpperCAmelCase = test_model(lowercase_ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def a_ ( self ) -> Optional[int]:
UpperCAmelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
UpperCAmelCase = torch.randn(2 , 3 )
UpperCAmelCase = model(lowercase_ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(lowercase_ , AlignDevicesHook(io_same_device=lowercase_ ) )
UpperCAmelCase = torch.randn(2 , 3 ).to(0 )
UpperCAmelCase = model(lowercase_ )
self.assertEqual(output.device , torch.device(0 ) )
def a_ ( self ) -> Dict:
UpperCAmelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
UpperCAmelCase = {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase_ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowercase_ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase_ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase = torch.device(hook_kwargs['execution_device'] )
self.assertEqual(model.batchnorm.running_mean.device , lowercase_ )
UpperCAmelCase = torch.randn(2 , 3 )
UpperCAmelCase = model(lowercase_ )
self.assertEqual(output.device , lowercase_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
UpperCAmelCase = {
'execution_device': 0 if torch.cuda.is_available() else 'cpu',
'offload': True,
'offload_buffers': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase_ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowercase_ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase_ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
UpperCAmelCase = torch.randn(2 , 3 )
UpperCAmelCase = model(lowercase_ )
self.assertEqual(output.device , lowercase_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def a_ ( self ) -> Optional[int]:
UpperCAmelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
UpperCAmelCase = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(lowercase_ , execution_device=lowercase_ , offload=lowercase_ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase = torch.device(lowercase_ )
self.assertEqual(model.batchnorm.running_mean.device , lowercase_ )
UpperCAmelCase = torch.randn(2 , 3 )
UpperCAmelCase = model(lowercase_ )
self.assertEqual(output.device , lowercase_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowercase_ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(lowercase_ , execution_device=lowercase_ , offload=lowercase_ , offload_buffers=lowercase_ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
UpperCAmelCase = torch.randn(2 , 3 )
UpperCAmelCase = model(lowercase_ )
self.assertEqual(output.device , lowercase_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowercase_ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def a_ ( self ) -> Dict:
UpperCAmelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
UpperCAmelCase = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(
lowercase_ , execution_device=lowercase_ , offload=lowercase_ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase = torch.device(lowercase_ )
self.assertEqual(model.batchnorm.running_mean.device , lowercase_ )
UpperCAmelCase = torch.randn(2 , 3 )
UpperCAmelCase = model(lowercase_ )
self.assertEqual(output.device , lowercase_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowercase_ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
lowercase_ , execution_device=lowercase_ , offload=lowercase_ , weights_map=model.state_dict() , offload_buffers=lowercase_ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
UpperCAmelCase = torch.randn(2 , 3 )
UpperCAmelCase = model(lowercase_ )
self.assertEqual(output.device , lowercase_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowercase_ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
| 183 | 1 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__a :Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : int = XGLMTokenizer
_lowerCamelCase : Union[str, Any] = XGLMTokenizerFast
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[Any] = True
def __A ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
A_ = XGLMTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self : Dict ):
A_ = "<pad>"
A_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def __A ( self : Optional[int] ):
A_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(len(UpperCAmelCase ) , 1008 )
def __A ( self : int ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def __A ( self : Dict ):
A_ = XGLMTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
A_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
A_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
A_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def __A ( self : List[Any] ):
return XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
def __A ( self : Union[str, Any] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(UpperCAmelCase , f.name )
A_ = XGLMTokenizer(f.name , keep_accents=UpperCAmelCase )
A_ = pickle.dumps(UpperCAmelCase )
pickle.loads(UpperCAmelCase )
def __A ( self : str ):
if not self.test_rust_tokenizer:
return
A_ = self.get_tokenizer()
A_ = self.get_rust_tokenizer()
A_ = "I was born in 92000, and this is falsé."
A_ = tokenizer.tokenize(UpperCAmelCase )
A_ = rust_tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
A_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
A_ = rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
A_ = self.get_rust_tokenizer()
A_ = tokenizer.encode(UpperCAmelCase )
A_ = rust_tokenizer.encode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def __A ( self : Optional[int] ):
A_ = "Hello World!"
A_ = [2, 31227, 4447, 35]
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@slow
def __A ( self : Any ):
A_ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
A_ = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@slow
def __A ( self : Any ):
# fmt: off
A_ = {
"input_ids": [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name="facebook/xglm-564M" , padding=UpperCAmelCase , ) | 86 |
import re
def lowerCAmelCase__ ( UpperCamelCase_ : str )-> str:
if len(re.findall('''[ATCG]''' , UpperCamelCase_ ) ) != len(UpperCamelCase_ ):
raise ValueError('''Invalid Strand''' )
return dna.translate(dna.maketrans('''ATCG''' , '''TAGC''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 632 | 0 |
'''simple docstring'''
import math
def _A ( A__ , A__ ):
"""simple docstring"""
if (
not isinstance(A__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def _A ( A__ , A__ ):
"""simple docstring"""
if (
not isinstance(A__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 624 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
lowerCAmelCase__ = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
lowerCAmelCase__ = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
lowerCAmelCase__ = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) ,reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] ,)
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Union[str, Any]=False ):
__lowercase = spearmanr(lowercase__ ,lowercase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 624 | 1 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
_A : List[str] = random.Random()
def UpperCamelCase_ ( snake_case_ : str , snake_case_ : Tuple=1.0 , snake_case_ : Optional[int]=None , snake_case_ : Tuple=None ) -> Optional[Any]:
'''simple docstring'''
if rng is None:
__lowerCAmelCase = global_rng
__lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int=7 , SCREAMING_SNAKE_CASE__ : Any=4_00 , SCREAMING_SNAKE_CASE__ : List[Any]=20_00 , SCREAMING_SNAKE_CASE__ : str=1 , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : List[Any]=1_60_00 , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Any=True , ) -> List[Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = min_seq_length
__lowerCAmelCase = max_seq_length
__lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowerCAmelCase = feature_size
__lowerCAmelCase = padding_value
__lowerCAmelCase = sampling_rate
__lowerCAmelCase = return_attention_mask
__lowerCAmelCase = do_normalize
def a ( self : str ) -> Optional[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> Optional[Any]:
def _flatten(SCREAMING_SNAKE_CASE__ : int ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE__ ) )
if equal_length:
__lowerCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__lowerCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowerCAmelCase = [np.asarray(SCREAMING_SNAKE_CASE__ ) for x in speech_inputs]
return speech_inputs
class _lowercase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Any = WavaVecaFeatureExtractor
def a ( self : Optional[Any] ) -> str:
__lowerCAmelCase = WavaVecaFeatureExtractionTester(self )
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]:
self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE__ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE__ , axis=0 ) - 1 ) < 1e-3 ) )
def a ( self : List[str] ) -> Tuple:
# Tests that all call wrap to encode_plus and batch_encode_plus
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__lowerCAmelCase = [np.asarray(SCREAMING_SNAKE_CASE__ ) for speech_input in speech_inputs]
# Test not batched input
__lowerCAmelCase = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
__lowerCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
# Test batched
__lowerCAmelCase = feat_extract(SCREAMING_SNAKE_CASE__ , return_tensors="""np""" ).input_values
__lowerCAmelCase = feat_extract(SCREAMING_SNAKE_CASE__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
__lowerCAmelCase = np.asarray(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = feat_extract(SCREAMING_SNAKE_CASE__ , return_tensors="""np""" ).input_values
__lowerCAmelCase = feat_extract(SCREAMING_SNAKE_CASE__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
def a ( self : Any ) -> Tuple:
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__lowerCAmelCase = ["""longest""", """max_length""", """do_not_pad"""]
__lowerCAmelCase = [None, 16_00, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = feat_extract(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors="""np""" )
__lowerCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self.assertTrue(input_values[0][8_00:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self.assertTrue(input_values[0][10_00:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def a ( self : Tuple ) -> str:
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase = range(8_00 , 14_00 , 2_00 )
__lowerCAmelCase = [floats_list((1, x) )[0] for x in lengths]
__lowerCAmelCase = ["""longest""", """max_length""", """do_not_pad"""]
__lowerCAmelCase = [None, 16_00, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = feat_extract(SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def a ( self : str ) -> List[Any]:
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__lowerCAmelCase = feat_extract(
SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=10_00 , padding="""max_length""" , return_tensors="""np""" )
__lowerCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def a ( self : Tuple ) -> Union[str, Any]:
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__lowerCAmelCase = feat_extract(
SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=10_00 , padding="""longest""" , return_tensors="""np""" )
__lowerCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00) )
__lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__lowerCAmelCase = feat_extract(
SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=20_00 , padding="""longest""" , return_tensors="""np""" )
__lowerCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00) )
@require_torch
def a ( self : Union[str, Any] ) -> Any:
import torch
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase = np.random.rand(1_00 ).astype(np.floataa )
__lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowerCAmelCase = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__lowerCAmelCase = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def a ( self : str ) -> Dict:
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
__lowerCAmelCase = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE__ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == """layer""" )
| 427 | '''simple docstring'''
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : NestedDataStructureLike[PathLike] , SCREAMING_SNAKE_CASE__ : Optional[NamedSplit] = None , SCREAMING_SNAKE_CASE__ : Optional[Features] = None , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : int , ) -> Any:
super().__init__(
SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ , streaming=SCREAMING_SNAKE_CASE__ , num_proc=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__lowerCAmelCase = path_or_paths if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else {self.split: path_or_paths}
__lowerCAmelCase = Text(
cache_dir=SCREAMING_SNAKE_CASE__ , data_files=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def a ( self : Dict ) -> str:
# Build iterable dataset
if self.streaming:
__lowerCAmelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE__ , download_mode=SCREAMING_SNAKE_CASE__ , verification_mode=SCREAMING_SNAKE_CASE__ , base_path=SCREAMING_SNAKE_CASE__ , num_proc=self.num_proc , )
__lowerCAmelCase = self.builder.as_dataset(
split=self.split , verification_mode=SCREAMING_SNAKE_CASE__ , in_memory=self.keep_in_memory )
return dataset
| 427 | 1 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
_lowerCamelCase = re.compile(r'\b(a|an|the)\b', re.UNICODE)
_lowerCamelCase = None
def __UpperCAmelCase( ):
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=lowercase_ , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=lowercase_ , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __UpperCAmelCase( lowercase_ ):
_lowerCamelCase : Union[str, Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_lowerCamelCase : Union[str, Any] = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def __UpperCAmelCase( lowercase_ ):
def remove_articles(lowercase_ ):
return ARTICLES_REGEX.sub(''' ''' , lowercase_ )
def white_space_fix(lowercase_ ):
return " ".join(text.split() )
def remove_punc(lowercase_ ):
_lowerCamelCase : List[str] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowercase_ ) ) ) )
def __UpperCAmelCase( lowercase_ ):
if not s:
return []
return normalize_answer(lowercase_ ).split()
def __UpperCAmelCase( lowercase_ , lowercase_ ):
return int(normalize_answer(lowercase_ ) == normalize_answer(lowercase_ ) )
def __UpperCAmelCase( lowercase_ , lowercase_ ):
_lowerCamelCase : int = get_tokens(lowercase_ )
_lowerCamelCase : List[Any] = get_tokens(lowercase_ )
_lowerCamelCase : Optional[int] = collections.Counter(lowercase_ ) & collections.Counter(lowercase_ )
_lowerCamelCase : str = sum(common.values() )
if len(lowercase_ ) == 0 or len(lowercase_ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
_lowerCamelCase : int = 1.0 * num_same / len(lowercase_ )
_lowerCamelCase : Optional[Any] = 1.0 * num_same / len(lowercase_ )
_lowerCamelCase : Tuple = (2 * precision * recall) / (precision + recall)
return fa
def __UpperCAmelCase( lowercase_ , lowercase_ ):
_lowerCamelCase : int = {}
_lowerCamelCase : str = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_lowerCamelCase : Dict = qa['''id''']
_lowerCamelCase : Tuple = [t for t in qa['''answers''']['''text'''] if normalize_answer(lowercase_ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
_lowerCamelCase : Tuple = ['''''']
if qid not in preds:
print(F"""Missing prediction for {qid}""" )
continue
_lowerCamelCase : List[str] = preds[qid]
# Take max over all gold answers
_lowerCamelCase : Any = max(compute_exact(lowercase_ , lowercase_ ) for a in gold_answers )
_lowerCamelCase : Optional[int] = max(compute_fa(lowercase_ , lowercase_ ) for a in gold_answers )
return exact_scores, fa_scores
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
_lowerCamelCase : List[str] = {}
for qid, s in scores.items():
_lowerCamelCase : int = na_probs[qid] > na_prob_thresh
if pred_na:
_lowerCamelCase : Optional[int] = float(not qid_to_has_ans[qid] )
else:
_lowerCamelCase : List[str] = s
return new_scores
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_=None ):
if not qid_list:
_lowerCamelCase : Optional[int] = len(lowercase_ )
return collections.OrderedDict(
[
('''exact''', 1_00.0 * sum(exact_scores.values() ) / total),
('''f1''', 1_00.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
_lowerCamelCase : int = len(lowercase_ )
return collections.OrderedDict(
[
('''exact''', 1_00.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 1_00.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ ):
for k in new_eval:
_lowerCamelCase : Union[str, Any] = new_eval[k]
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
plt.step(lowercase_ , lowercase_ , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(lowercase_ , lowercase_ , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.0_5] )
plt.ylim([0.0, 1.0_5] )
plt.title(lowercase_ )
plt.savefig(lowercase_ )
plt.clf()
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None ):
_lowerCamelCase : Optional[int] = sorted(lowercase_ , key=lambda lowercase_ : na_probs[k] )
_lowerCamelCase : Any = 0.0
_lowerCamelCase : Dict = 1.0
_lowerCamelCase : str = 0.0
_lowerCamelCase : Tuple = [1.0]
_lowerCamelCase : Optional[Any] = [0.0]
_lowerCamelCase : str = 0.0
for i, qid in enumerate(lowercase_ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
_lowerCamelCase : Dict = true_pos / float(i + 1 )
_lowerCamelCase : List[Any] = true_pos / float(lowercase_ )
if i == len(lowercase_ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowercase_ )
recalls.append(lowercase_ )
if out_image:
plot_pr_curve(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
return {"ap": 1_00.0 * avg_prec}
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
if out_image_dir and not os.path.exists(lowercase_ ):
os.makedirs(lowercase_ )
_lowerCamelCase : List[Any] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
_lowerCamelCase : List[str] = make_precision_recall_eval(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , out_image=os.path.join(lowercase_ , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
_lowerCamelCase : Optional[int] = make_precision_recall_eval(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , out_image=os.path.join(lowercase_ , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
_lowerCamelCase : List[str] = {k: float(lowercase_ ) for k, v in qid_to_has_ans.items()}
_lowerCamelCase : List[str] = make_precision_recall_eval(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , out_image=os.path.join(lowercase_ , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(lowercase_ , lowercase_ , '''pr_exact''' )
merge_eval(lowercase_ , lowercase_ , '''pr_f1''' )
merge_eval(lowercase_ , lowercase_ , '''pr_oracle''' )
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
if not qid_list:
return
_lowerCamelCase : int = [na_probs[k] for k in qid_list]
_lowerCamelCase : Optional[int] = np.ones_like(lowercase_ ) / float(len(lowercase_ ) )
plt.hist(lowercase_ , weights=lowercase_ , bins=20 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(F"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(lowercase_ , F"""na_prob_hist_{name}.png""" ) )
plt.clf()
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
_lowerCamelCase : Optional[int] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
_lowerCamelCase : str = num_no_ans
_lowerCamelCase : Tuple = cur_score
_lowerCamelCase : Union[str, Any] = 0.0
_lowerCamelCase : Tuple = sorted(lowercase_ , key=lambda lowercase_ : na_probs[k] )
for i, qid in enumerate(lowercase_ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
_lowerCamelCase : Any = scores[qid]
else:
if preds[qid]:
_lowerCamelCase : Any = -1
else:
_lowerCamelCase : Tuple = 0
cur_score += diff
if cur_score > best_score:
_lowerCamelCase : Union[str, Any] = cur_score
_lowerCamelCase : Tuple = na_probs[qid]
return 1_00.0 * best_score / len(lowercase_ ), best_thresh
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
_lowerCamelCase : Optional[int] = find_best_thresh(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
_lowerCamelCase : List[Any] = find_best_thresh(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
_lowerCamelCase : List[Any] = best_exact
_lowerCamelCase : int = exact_thresh
_lowerCamelCase : Optional[Any] = best_fa
_lowerCamelCase : List[str] = fa_thresh
def __UpperCAmelCase( ):
with open(OPTS.data_file ) as f:
_lowerCamelCase : Any = json.load(lowercase_ )
_lowerCamelCase : Dict = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
_lowerCamelCase : Tuple = json.load(lowercase_ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
_lowerCamelCase : Tuple = json.load(lowercase_ )
else:
_lowerCamelCase : str = {k: 0.0 for k in preds}
_lowerCamelCase : List[Any] = make_qid_to_has_ans(lowercase_ ) # maps qid to True/False
_lowerCamelCase : Optional[int] = [k for k, v in qid_to_has_ans.items() if v]
_lowerCamelCase : Union[str, Any] = [k for k, v in qid_to_has_ans.items() if not v]
_lowerCamelCase : str = get_raw_scores(lowercase_ , lowercase_ )
_lowerCamelCase : Optional[int] = apply_no_ans_threshold(lowercase_ , lowercase_ , lowercase_ , OPTS.na_prob_thresh )
_lowerCamelCase : Tuple = apply_no_ans_threshold(lowercase_ , lowercase_ , lowercase_ , OPTS.na_prob_thresh )
_lowerCamelCase : Dict = make_eval_dict(lowercase_ , lowercase_ )
if has_ans_qids:
_lowerCamelCase : Dict = make_eval_dict(lowercase_ , lowercase_ , qid_list=lowercase_ )
merge_eval(lowercase_ , lowercase_ , '''HasAns''' )
if no_ans_qids:
_lowerCamelCase : Tuple = make_eval_dict(lowercase_ , lowercase_ , qid_list=lowercase_ )
merge_eval(lowercase_ , lowercase_ , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , OPTS.out_image_dir )
histogram_na_prob(lowercase_ , lowercase_ , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(lowercase_ , lowercase_ , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(lowercase_ , lowercase_ )
else:
print(json.dumps(lowercase_ , indent=2 ) )
if __name__ == "__main__":
_lowerCamelCase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 710 |
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def __UpperCAmelCase( lowercase_ ):
return EnvironmentCommand()
def __UpperCAmelCase( lowercase_ ):
return EnvironmentCommand(args.accelerate_config_file )
class __A ( lowerCamelCase__ ):
"""simple docstring"""
@staticmethod
def __snake_case ( a__):
"""simple docstring"""
_lowerCamelCase : List[Any] = parser.add_parser('''env''')
download_parser.set_defaults(func=a__)
download_parser.add_argument(
'''--accelerate-config_file''' , default=a__ , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=a__)
def __init__( self , a__ , *a__):
"""simple docstring"""
_lowerCamelCase : str = accelerate_config_file
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = '''not installed'''
if is_safetensors_available():
import safetensors
_lowerCamelCase : Optional[Any] = safetensors.__version__
elif importlib.util.find_spec('''safetensors''') is not None:
import safetensors
_lowerCamelCase : Optional[int] = F"""{safetensors.__version__} but is ignored because of PyTorch version too old."""
_lowerCamelCase : Union[str, Any] = '''not installed'''
_lowerCamelCase : Any = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_lowerCamelCase : Optional[int] = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(a__):
_lowerCamelCase : Optional[int] = load_config_from_file(self._accelerate_config_file).to_dict()
_lowerCamelCase : str = (
'''\n'''.join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()])
if isinstance(a__ , a__)
else F"""\t{accelerate_config}"""
)
_lowerCamelCase : List[Any] = '''not installed'''
_lowerCamelCase : Tuple = '''NA'''
if is_torch_available():
import torch
_lowerCamelCase : int = torch.__version__
_lowerCamelCase : List[str] = torch.cuda.is_available()
_lowerCamelCase : str = '''not installed'''
_lowerCamelCase : Union[str, Any] = '''NA'''
if is_tf_available():
import tensorflow as tf
_lowerCamelCase : List[str] = tf.__version__
try:
# deprecated in v2.1
_lowerCamelCase : Optional[int] = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_lowerCamelCase : Optional[int] = bool(tf.config.list_physical_devices('''GPU'''))
_lowerCamelCase : str = '''not installed'''
_lowerCamelCase : List[Any] = '''not installed'''
_lowerCamelCase : List[Any] = '''not installed'''
_lowerCamelCase : Optional[int] = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
_lowerCamelCase : Any = flax.__version__
_lowerCamelCase : str = jax.__version__
_lowerCamelCase : Any = jaxlib.__version__
_lowerCamelCase : int = jax.lib.xla_bridge.get_backend().platform
_lowerCamelCase : int = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': F"""{safetensors_version}""",
'''Accelerate version''': F"""{accelerate_version}""",
'''Accelerate config''': F"""{accelerate_config_str}""",
'''PyTorch version (GPU?)''': F"""{pt_version} ({pt_cuda_available})""",
'''Tensorflow version (GPU?)''': F"""{tf_version} ({tf_cuda_available})""",
'''Flax version (CPU?/GPU?/TPU?)''': F"""{flax_version} ({jax_backend})""",
'''Jax version''': F"""{jax_version}""",
'''JaxLib version''': F"""{jaxlib_version}""",
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''')
print(self.format_dict(a__))
return info
@staticmethod
def __snake_case ( a__):
"""simple docstring"""
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()]) + "\n"
| 613 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCAmelCase = {
'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'ResNetForImageClassification',
'ResNetModel',
'ResNetPreTrainedModel',
'ResNetBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFResNetForImageClassification',
'TFResNetModel',
'TFResNetPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'FlaxResNetForImageClassification',
'FlaxResNetModel',
'FlaxResNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 651 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ : Dict = logging.get_logger(__name__)
snake_case__ : Any = {
'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json',
'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json',
'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json',
'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class _a ( A__ ):
"""simple docstring"""
snake_case ="""mobilenet_v2"""
def __init__( self , _snake_case=3 , _snake_case=224 , _snake_case=1.0 , _snake_case=8 , _snake_case=8 , _snake_case=6 , _snake_case=32 , _snake_case=True , _snake_case=True , _snake_case="relu6" , _snake_case=True , _snake_case=0.8 , _snake_case=0.02 , _snake_case=0.001 , _snake_case=255 , **_snake_case , ):
super().__init__(**_snake_case )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
_UpperCAmelCase =num_channels
_UpperCAmelCase =image_size
_UpperCAmelCase =depth_multiplier
_UpperCAmelCase =depth_divisible_by
_UpperCAmelCase =min_depth
_UpperCAmelCase =expand_ratio
_UpperCAmelCase =output_stride
_UpperCAmelCase =first_layer_is_expansion
_UpperCAmelCase =finegrained_output
_UpperCAmelCase =hidden_act
_UpperCAmelCase =tf_padding
_UpperCAmelCase =classifier_dropout_prob
_UpperCAmelCase =initializer_range
_UpperCAmelCase =layer_norm_eps
_UpperCAmelCase =semantic_loss_ignore_index
class _a ( A__ ):
"""simple docstring"""
snake_case =version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE ( self ):
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def SCREAMING_SNAKE_CASE ( self ):
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def SCREAMING_SNAKE_CASE ( self ):
return 1E-4
| 408 | 0 |
import requests
def __lowerCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : str ) -> Union[str, Any]:
__lowerCAmelCase ={"Content-Type": "application/json"}
__lowerCAmelCase =requests.post(_lowerCAmelCase , json={"""text""": message_body} , headers=_lowerCAmelCase )
if response.status_code != 200:
__lowerCAmelCase =(
"Request to slack returned an error "
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(_lowerCAmelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 711 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
# TODO Update this
lowercase_ = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __a ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = "esm"
def __init__( self : Any , snake_case_ : List[str]=None , snake_case_ : Tuple=None , snake_case_ : str=None , snake_case_ : str=7_68 , snake_case_ : Optional[int]=12 , snake_case_ : Tuple=12 , snake_case_ : Dict=30_72 , snake_case_ : Optional[Any]=0.1 , snake_case_ : Dict=0.1 , snake_case_ : Optional[Any]=10_26 , snake_case_ : int=0.0_2 , snake_case_ : Any=1e-12 , snake_case_ : Union[str, Any]="absolute" , snake_case_ : Optional[int]=True , snake_case_ : Tuple=None , snake_case_ : Optional[int]=False , snake_case_ : int=False , snake_case_ : Optional[int]=None , snake_case_ : List[Any]=None , **snake_case_ : Optional[int] , )-> Any:
super().__init__(pad_token_id=snake_case_ , mask_token_id=snake_case_ , **snake_case_)
__lowerCAmelCase =vocab_size
__lowerCAmelCase =hidden_size
__lowerCAmelCase =num_hidden_layers
__lowerCAmelCase =num_attention_heads
__lowerCAmelCase =intermediate_size
__lowerCAmelCase =hidden_dropout_prob
__lowerCAmelCase =attention_probs_dropout_prob
__lowerCAmelCase =max_position_embeddings
__lowerCAmelCase =initializer_range
__lowerCAmelCase =layer_norm_eps
__lowerCAmelCase =position_embedding_type
__lowerCAmelCase =use_cache
__lowerCAmelCase =emb_layer_norm_before
__lowerCAmelCase =token_dropout
__lowerCAmelCase =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""")
__lowerCAmelCase =EsmFoldConfig()
elif isinstance(snake_case_ , snake_case_):
__lowerCAmelCase =EsmFoldConfig(**snake_case_)
__lowerCAmelCase =esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""")
__lowerCAmelCase =get_default_vocab_list()
else:
__lowerCAmelCase =vocab_list
else:
__lowerCAmelCase =None
__lowerCAmelCase =None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , snake_case_):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""")
def UpperCamelCase ( self : Dict)-> Union[str, Any]:
__lowerCAmelCase =super().to_dict()
if isinstance(self.esmfold_config , snake_case_):
__lowerCAmelCase =self.esmfold_config.to_dict()
return output
@dataclass
class __a :
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = 128
SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self : Dict)-> List[Any]:
if self.trunk is None:
__lowerCAmelCase =TrunkConfig()
elif isinstance(self.trunk , snake_case_):
__lowerCAmelCase =TrunkConfig(**self.trunk)
def UpperCamelCase ( self : Optional[Any])-> int:
__lowerCAmelCase =asdict(self)
__lowerCAmelCase =self.trunk.to_dict()
return output
@dataclass
class __a :
SCREAMING_SNAKE_CASE = 48
SCREAMING_SNAKE_CASE = 1024
SCREAMING_SNAKE_CASE = 128
SCREAMING_SNAKE_CASE = 32
SCREAMING_SNAKE_CASE = 32
SCREAMING_SNAKE_CASE = 32
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = 128
SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self : Tuple)-> Optional[int]:
if self.structure_module is None:
__lowerCAmelCase =StructureModuleConfig()
elif isinstance(self.structure_module , snake_case_):
__lowerCAmelCase =StructureModuleConfig(**self.structure_module)
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""")
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""")
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""")
__lowerCAmelCase =self.sequence_state_dim // self.sequence_head_width
__lowerCAmelCase =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""")
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""")
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""")
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""")
def UpperCamelCase ( self : Dict)-> Any:
__lowerCAmelCase =asdict(self)
__lowerCAmelCase =self.structure_module.to_dict()
return output
@dataclass
class __a :
SCREAMING_SNAKE_CASE = 384
SCREAMING_SNAKE_CASE = 128
SCREAMING_SNAKE_CASE = 16
SCREAMING_SNAKE_CASE = 128
SCREAMING_SNAKE_CASE = 12
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = 8
SCREAMING_SNAKE_CASE = 0.1
SCREAMING_SNAKE_CASE = 8
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 7
SCREAMING_SNAKE_CASE = 10
SCREAMING_SNAKE_CASE = 1E-8
SCREAMING_SNAKE_CASE = 1E5
def UpperCamelCase ( self : Any)-> Tuple:
return asdict(self)
def __lowerCAmelCase ( ) -> List[Any]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 456 | 0 |
'''simple docstring'''
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def __snake_case ( SCREAMING_SNAKE_CASE_ : str = "" ) -> dict[str, float]:
"""simple docstring"""
UpperCAmelCase = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
UpperCAmelCase = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE_ ).text , '''html.parser''' )
UpperCAmelCase = soup.find_all('''td''' , attrs='''titleColumn''' )
UpperCAmelCase = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
}
def __snake_case ( SCREAMING_SNAKE_CASE_ : str = "IMDb_Top_250_Movies.csv" ) -> None:
"""simple docstring"""
UpperCAmelCase = get_imdb_top_aaa_movies()
with open(SCREAMING_SNAKE_CASE_ , '''w''' , newline='''''' ) as out_file:
UpperCAmelCase = csv.writer(SCREAMING_SNAKE_CASE_ )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 51 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class _snake_case ( UpperCAmelCase_ ):
def __init__( self):
'''simple docstring'''
lowercase__ : List[Any] = []
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_init_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_train_begin""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_train_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_epoch_begin""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_epoch_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_step_begin""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_step_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_evaluate""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_predict""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_save""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_log""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_prediction_step""")
@require_torch
class _snake_case ( unittest.TestCase ):
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = tempfile.mkdtemp()
def lowercase__ ( self):
'''simple docstring'''
shutil.rmtree(self.output_dir)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Any = RegressionDataset(length=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = RegressionDataset(length=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = RegressionModelConfig(a=SCREAMING_SNAKE_CASE_ , b=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = RegressionPreTrainedModel(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = TrainingArguments(self.output_dir , disable_tqdm=SCREAMING_SNAKE_CASE_ , report_to=[] , **SCREAMING_SNAKE_CASE_)
return Trainer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , train_dataset=SCREAMING_SNAKE_CASE_ , eval_dataset=SCREAMING_SNAKE_CASE_ , callbacks=SCREAMING_SNAKE_CASE_ , )
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_))
# Order doesn't matter
lowercase__ : str = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: cb.__name__ if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else cb.__class__.__name__)
lowercase__ : Tuple = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: cb.__name__ if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else cb.__class__.__name__)
for cba, cba in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assertEqual(SCREAMING_SNAKE_CASE_ , cba.__class__)
elif not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assertEqual(cba.__class__ , SCREAMING_SNAKE_CASE_)
else:
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : int = ["""on_init_end""", """on_train_begin"""]
lowercase__ : Union[str, Any] = 0
lowercase__ : Union[str, Any] = len(trainer.get_eval_dataloader())
lowercase__ : Dict = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader()) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs):
expected_events.append("""on_epoch_begin""")
for _ in range(SCREAMING_SNAKE_CASE_):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""")
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""")
expected_events.append("""on_epoch_end""")
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = self.get_trainer()
lowercase__ : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
# Callbacks passed at init are added to the default callbacks
lowercase__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback])
expected_callbacks.append(SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowercase__ : Any = self.get_trainer(disable_tqdm=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowercase__ : Tuple = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.remove(SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = self.get_trainer()
lowercase__ : List[Any] = trainer.pop_callback(SCREAMING_SNAKE_CASE_)
self.assertEqual(cb.__class__ , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
trainer.add_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
# We can also add, pop, or remove by instance
lowercase__ : Union[str, Any] = self.get_trainer()
lowercase__ : Optional[Any] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.remove(SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
lowercase__ : str = self.get_trainer()
lowercase__ : Optional[Any] = trainer.callback_handler.callbacks[0]
lowercase__ : Union[str, Any] = trainer.pop_callback(SCREAMING_SNAKE_CASE_)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
trainer.add_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback])
trainer.train()
lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
# Independent log/save/eval
lowercase__ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5)
trainer.train()
lowercase__ : List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5)
trainer.train()
lowercase__ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
lowercase__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""")
trainer.train()
lowercase__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
lowercase__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""")
trainer.train()
lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
# A bit of everything
lowercase__ : Any = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
lowercase__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""") as warn_mock:
lowercase__ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(SCREAMING_SNAKE_CASE_) in warn_mock.call_args[0][0]
| 12 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a ( UpperCamelCase_ ):
def lowerCAmelCase_ ( self : Optional[Any] ) -> Dict:
__a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(__A , """neck_hidden_sizes""" ) )
self.parent.assertTrue(hasattr(__A , """num_attention_heads""" ) )
class a :
def __init__( self : Optional[int] , lowerCamelCase_ : Dict , lowerCamelCase_ : Any=13 , lowerCamelCase_ : Optional[int]=32 , lowerCamelCase_ : List[Any]=2 , lowerCamelCase_ : Dict=3 , lowerCamelCase_ : Optional[Any]=6_40 , lowerCamelCase_ : Any=4 , lowerCamelCase_ : Dict="silu" , lowerCamelCase_ : Union[str, Any]=3 , lowerCamelCase_ : Tuple=32 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Dict=0.02 , lowerCamelCase_ : Any=True , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : str=10 , lowerCamelCase_ : List[str]=None , ) -> List[str]:
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = last_hidden_size
__a = num_attention_heads
__a = hidden_act
__a = conv_kernel_size
__a = output_stride
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = classifier_dropout_prob
__a = use_labels
__a = is_training
__a = num_labels
__a = initializer_range
__a = scope
def lowerCAmelCase_ ( self : List[str] ) -> List[Any]:
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] ) -> Union[str, Any]:
__a = MobileViTModel(config=__A )
model.to(__A )
model.eval()
__a = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase_ ( self : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any ) -> Dict:
__a = self.num_labels
__a = MobileViTForImageClassification(__A )
model.to(__A )
model.eval()
__a = model(__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] ) -> int:
__a = self.num_labels
__a = MobileViTForSemanticSegmentation(__A )
model.to(__A )
model.eval()
__a = model(__A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__a = model(__A , labels=__A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase_ ( self : Optional[int] ) -> List[str]:
__a = self.prepare_config_and_inputs()
__a = config_and_inputs
__a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
A_ : str = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
A_ : Dict = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A_ : List[str] = False
A_ : int = False
A_ : Optional[int] = False
A_ : int = False
def lowerCAmelCase_ ( self : Any ) -> int:
__a = MobileViTModelTester(self )
__a = MobileViTConfigTester(self , config_class=__A , has_text_modality=__A )
def lowerCAmelCase_ ( self : Optional[Any] ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViT does not use inputs_embeds""" )
def lowerCAmelCase_ ( self : Union[str, Any] ) -> Dict:
pass
@unittest.skip(reason="""MobileViT does not support input and output embeddings""" )
def lowerCAmelCase_ ( self : List[str] ) -> List[Any]:
pass
@unittest.skip(reason="""MobileViT does not output attentions""" )
def lowerCAmelCase_ ( self : int ) -> Optional[Any]:
pass
def lowerCAmelCase_ ( self : Any ) -> List[Any]:
__a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__A )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __A )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase_ ( self : Tuple ) -> Optional[Any]:
pass
def lowerCAmelCase_ ( self : List[Any] ) -> Tuple:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
def check_hidden_states_output(lowerCamelCase_ : Any , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[Any] ):
__a = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(__A , __A ) )
__a = outputs.hidden_states
__a = 5
self.assertEqual(len(__A ) , __A )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__a = 2
for i in range(len(__A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(__A , __A , __A )
def lowerCAmelCase_ ( self : Any ) -> Dict:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def lowerCAmelCase_ ( self : List[Any] ) -> List[Any]:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__A )
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MobileViTModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def UpperCamelCase ( ):
__a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def lowerCAmelCase_ ( self : Any ) -> Tuple:
return MobileViTImageProcessor.from_pretrained("""apple/mobilevit-xx-small""" ) if is_vision_available() else None
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
__a = MobileViTForImageClassification.from_pretrained("""apple/mobilevit-xx-small""" ).to(__A )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=__A , return_tensors="""pt""" ).to(__A )
# forward pass
with torch.no_grad():
__a = model(**__A )
# verify the logits
__a = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __A )
__a = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1E-4 ) )
@slow
def lowerCAmelCase_ ( self : str ) -> str:
__a = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__a = model.to(__A )
__a = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__a = prepare_img()
__a = image_processor(images=__A , return_tensors="""pt""" ).to(__A )
# forward pass
with torch.no_grad():
__a = model(**__A )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __A )
__a = torch.tensor(
[
[[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]],
[[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]],
[[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]],
] , device=__A , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __A , atol=1E-4 ) )
@slow
def lowerCAmelCase_ ( self : Any ) -> Optional[Any]:
__a = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__a = model.to(__A )
__a = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__a = prepare_img()
__a = image_processor(images=__A , return_tensors="""pt""" ).to(__A )
# forward pass
with torch.no_grad():
__a = model(**__A )
__a = outputs.logits.detach().cpu()
__a = image_processor.post_process_semantic_segmentation(outputs=__A , target_sizes=[(50, 60)] )
__a = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __A )
__a = image_processor.post_process_semantic_segmentation(outputs=__A )
__a = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __A )
| 703 | """simple docstring"""
def UpperCamelCase ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
__a = (boundary[1] - boundary[0]) / steps
__a = boundary[0]
__a = boundary[1]
__a = make_points(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__a = 0.0
y += (h / 2.0) * f(_lowerCAmelCase )
for i in x_i:
# print(i)
y += h * f(_lowerCAmelCase )
y += (h / 2.0) * f(_lowerCAmelCase )
return y
def UpperCamelCase ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : int ):
__a = a + h
while x < (b - h):
yield x
__a = x + h
def UpperCamelCase ( _lowerCAmelCase : int ): # enter your function here
__a = (x - 0) * (x - 0)
return y
def UpperCamelCase ( ):
__a = 0.0 # Lower bound of integration
__a = 1.0 # Upper bound of integration
__a = 10.0 # define number of steps or resolution
__a = [a, b] # define boundary of integration
__a = method_a(_lowerCAmelCase , _lowerCAmelCase )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 173 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class a :
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any]=13 , lowerCamelCase__ : str=10 , lowerCamelCase__ : Optional[int]=3 , lowerCamelCase__ : Optional[int]=2 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : List[Any]=2 , lowerCamelCase__ : str=True , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : str=32 , lowerCamelCase__ : Tuple=5 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Union[str, Any]=37 , lowerCamelCase__ : Optional[int]="gelu" , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : Dict=0.1 , lowerCamelCase__ : List[str]=10 , lowerCamelCase__ : str=0.0_2 , lowerCamelCase__ : Union[str, Any]=0.9 , lowerCamelCase__ : Optional[int]=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = patch_size
__lowercase = tubelet_size
__lowercase = num_frames
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = mask_ratio
__lowercase = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
__lowercase = (image_size // patch_size) ** 2
__lowercase = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
__lowercase = int(mask_ratio * self.seq_length )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ) -> Any:
"""simple docstring"""
__lowercase = VideoMAEModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : str , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : str ) -> Dict:
"""simple docstring"""
__lowercase = VideoMAEForPreTraining(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__lowercase = torch.ones((self.num_masks,) )
__lowercase = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
__lowercase = mask.expand(self.batch_size , -1 ).bool()
__lowercase = model(lowerCamelCase__ , lowerCamelCase__ )
# model only returns predictions for masked patches
__lowercase = mask.sum().item()
__lowercase = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : int = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
UpperCamelCase_ : Dict = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ : str = False
UpperCamelCase_ : str = False
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : Union[str, Any] = False
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = VideoMAEModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def UpperCAmelCase_ ( self : List[str] , lowerCamelCase__ : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any]=False ) -> Optional[Any]:
"""simple docstring"""
__lowercase = copy.deepcopy(lowerCamelCase__ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__lowercase = torch.ones((self.model_tester.num_masks,) )
__lowercase = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
__lowercase = mask.expand(self.model_tester.batch_size , -1 ).bool()
__lowercase = bool_masked_pos.to(lowerCamelCase__ )
if return_labels:
if model_class in [
*get_values(lowerCamelCase__ ),
]:
__lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase__ )
return inputs_dict
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowerCamelCase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCAmelCase_ ( self : Any ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__ )
@slow
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = VideoMAEModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
"""simple docstring"""
if not self.has_attentions:
pass
else:
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
for model_class in self.all_model_classes:
__lowercase = self.model_tester.seq_length - self.model_tester.num_masks
__lowercase = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
__lowercase = True
__lowercase = False
__lowercase = True
__lowercase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
__lowercase = outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
__lowercase = outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__lowercase = len(lowerCamelCase__ )
# Check attention is always last and order is fine
__lowercase = True
__lowercase = True
__lowercase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(out_len + 1 , len(lowerCamelCase__ ) )
__lowercase = outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def UpperCAmelCase_ ( self : List[str] ) -> str:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] ):
__lowercase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
__lowercase = outputs.hidden_states
__lowercase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
__lowercase = self.model_tester.seq_length - self.model_tester.num_masks
__lowercase = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
pass
def _A( ) -> Dict:
'''simple docstring'''
__lowercase = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
__lowercase = np.load(UpperCamelCase__ )
return list(UpperCamelCase__ )
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self : int ) -> Any:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self : int ) -> Any:
"""simple docstring"""
__lowercase = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
lowerCamelCase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_video()
__lowercase = image_processor(lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
__lowercase = model(**lowerCamelCase__ )
# verify the logits
__lowercase = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
__lowercase = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
@slow
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(lowerCamelCase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_video()
__lowercase = image_processor(lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
# add boolean mask, indicating which patches to mask
__lowercase = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
__lowercase = torch.load(lowerCamelCase__ )
# forward pass
with torch.no_grad():
__lowercase = model(**lowerCamelCase__ )
# verify the logits
__lowercase = torch.Size([1, 1_408, 1_536] )
__lowercase = torch.tensor(
[[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] , device=lowerCamelCase__ )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
__lowercase = torch.tensor([0.5_1_4_2] , device=lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.loss , lowerCamelCase__ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
__lowercase = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=lowerCamelCase__ ).to(
lowerCamelCase__ )
with torch.no_grad():
__lowercase = model(**lowerCamelCase__ )
__lowercase = torch.tensor(torch.tensor([0.6_4_6_9] ) , device=lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.loss , lowerCamelCase__ , atol=1e-4 ) )
| 332 |
def _A( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
__lowercase = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _A( UpperCamelCase__ : int = 100 ) -> int:
'''simple docstring'''
__lowercase = 1
__lowercase = 2
for i in range(2 , max_n + 1 ):
__lowercase = pre_numerator
__lowercase = 2 * i // 3 if i % 3 == 0 else 1
__lowercase = cur_numerator
__lowercase = e_cont * pre_numerator + temp
return sum_digits(UpperCamelCase__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 332 | 1 |
from PIL import Image
def _lowerCamelCase( lowerCAmelCase__ : Image , lowerCAmelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = (259 * (level + 255)) / (255 * (259 - level))
def contrast(lowerCAmelCase__ : int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(lowerCAmelCase__ )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
A = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png') | 97 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
A = logging.get_logger(__name__)
def _lowerCamelCase( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[int, Iterable[int]] , lowerCAmelCase__ : bool , lowerCAmelCase__ : int ):
'''simple docstring'''
def constraint_to_multiple_of(lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any]=0 , lowerCAmelCase__ : Any=None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE_ : Dict = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE_ : str = (output_size, output_size) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else output_size
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = get_image_size(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE_ : Optional[int] = output_height / input_height
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE_ : str = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE_ : Optional[Any] = scale_height
SCREAMING_SNAKE_CASE_ : Any = constraint_to_multiple_of(scale_height * input_height , multiple=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = constraint_to_multiple_of(scale_width * input_width , multiple=lowerCAmelCase__ )
return (new_height, new_width)
class __a ( __A ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = ["""pixel_values"""]
def __init__( self , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = PILImageResampling.BILINEAR , UpperCamelCase__ = False , UpperCamelCase__ = 1 , UpperCamelCase__ = True , UpperCamelCase__ = 1 / 255 , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ):
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Any = size if size is not None else {'height': 384, 'width': 384}
SCREAMING_SNAKE_CASE_ : str = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : str = do_resize
SCREAMING_SNAKE_CASE_ : str = size
SCREAMING_SNAKE_CASE_ : int = keep_aspect_ratio
SCREAMING_SNAKE_CASE_ : Tuple = ensure_multiple_of
SCREAMING_SNAKE_CASE_ : Optional[int] = resample
SCREAMING_SNAKE_CASE_ : int = do_rescale
SCREAMING_SNAKE_CASE_ : int = rescale_factor
SCREAMING_SNAKE_CASE_ : str = do_normalize
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE_ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = 1 , UpperCamelCase__ = PILImageResampling.BICUBIC , UpperCamelCase__ = None , **UpperCamelCase__ , ):
SCREAMING_SNAKE_CASE_ : List[Any] = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE_ : Optional[int] = get_resize_output_image_size(
UpperCamelCase__ , output_size=(size['height'], size['width']) , keep_aspect_ratio=UpperCamelCase__ , multiple=UpperCamelCase__ , )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ):
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ):
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = ChannelDimension.FIRST , **UpperCamelCase__ , ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ : Optional[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ : str = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE_ : str = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE_ : Union[str, Any] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ : Optional[int] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ : Dict = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ : List[Any] = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ : Any = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ : Tuple = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ : Any = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ : Any = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE_ : str = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE_ : Dict = {'pixel_values': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : int = target_sizes.numpy()
SCREAMING_SNAKE_CASE_ : Tuple = []
for idx in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE_ : Any = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 97 | 1 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = len(__a )
while cur > 1:
# Find the maximum number in arr
lowerCamelCase_ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCamelCase_ = arr[mi::-1] + arr[mi + 1 : len(__a )]
# Reverse whole list
lowerCamelCase_ = arr[cur - 1 :: -1] + arr[cur : len(__a )]
cur -= 1
return arr
if __name__ == "__main__":
A_ = input("""Enter numbers separated by a comma:\n""").strip()
A_ = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 29 |
def __lowerCamelCase ( __a :int = 4_0_0_0_0_0_0 ) -> int:
"""simple docstring"""
A__ = []
A__ , A__ = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__a )
A__ , A__ = b, a + b
return sum(__a )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 176 | 0 |
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
snake_case : List[str] = logging.get_logger(__name__)
def snake_case__ ( __lowercase , __lowercase , __lowercase , __lowercase=False ) -> Union[str, Any]:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
A__ : int = os.path.abspath(__lowercase )
logger.info(F'Loading PyTorch weights from {pt_path}' )
A__ : str = torch.load(__lowercase , map_location="cpu" )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
A__ : int = convert_pytorch_state_dict_to_flax(__lowercase , __lowercase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
A__ : Any = convert_pytorch_sharded_state_dict_to_flax(__lowercase , __lowercase )
return flax_state_dict
def snake_case__ ( __lowercase , __lowercase , __lowercase , __lowercase , ) -> (Tuple[str], np.ndarray):
"""simple docstring"""
def is_key_or_prefix_key_in_dict(__lowercase ) -> bool:
return len(set(__lowercase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
A__ : List[str] = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__lowercase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
A__ : List[Any] = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__lowercase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
A__ : Optional[Any] = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__lowercase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
A__ : List[str] = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__lowercase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
A__ : int = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__lowercase ):
A__ : Tuple = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A__ : str = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__lowercase ):
A__ : str = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A__ : Optional[Any] = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A__ : List[str] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
A__ : Any = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
A__ : Union[str, Any] = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
A__ : Optional[Any] = pt_tuple_key[-2] + "_v"
if name is not None:
A__ : int = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def snake_case__ ( __lowercase , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
A__ : int = {k: v.numpy() for k, v in pt_state_dict.items()}
A__ : Union[str, Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
A__ : List[str] = flax_model.params["params"]
else:
A__ : Union[str, Any] = flax_model.params
A__ : Union[str, Any] = flatten_dict(__lowercase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
A__ : Tuple = flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(__lowercase )
A__ : int = {}
A__ : Tuple = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
A__ : Optional[Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A__ : Dict = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
A__ : List[Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
A__ : Union[str, Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
A__ , A__ : List[str] = rename_key_and_reshape_tensor(
__lowercase , __lowercase , __lowercase , __lowercase )
# add model prefix if necessary
A__ : Union[str, Any] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
A__ : Dict = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
A__ : Optional[Any] = jnp.asarray(__lowercase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowercase , __lowercase )
continue
# also add unexpected weight so that warning is thrown
A__ : Any = jnp.asarray(__lowercase )
else:
# also add unexpected weight so that warning is thrown
A__ : List[Any] = jnp.asarray(__lowercase )
return unflatten_dict(__lowercase )
def snake_case__ ( __lowercase , __lowercase ) -> int:
"""simple docstring"""
import torch
# Load the index
A__ : List[str] = {}
for shard_file in shard_filenames:
# load using msgpack utils
A__ : List[str] = torch.load(__lowercase )
A__ : int = {k: v.numpy() for k, v in pt_state_dict.items()}
A__ : Optional[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
A__ : Optional[Any] = flax_model.params["params"]
A__ : List[str] = flatten_dict(__lowercase )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
A__ : List[Any] = flax_model.params
A__ : Optional[int] = flatten_dict(__lowercase )
A__ : List[Any] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
A__ : Tuple = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A__ : int = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
A__ : List[Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
A__ : Any = pt_tuple_key[1:]
# Correctly rename weight parameters
A__ , A__ : str = rename_key_and_reshape_tensor(
__lowercase , __lowercase , __lowercase , __lowercase )
# add model prefix if necessary
A__ : str = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
A__ : List[str] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
A__ : Union[str, Any] = jnp.asarray(__lowercase )
continue
if "var" in flax_key[-1]:
A__ : int = jnp.asarray(__lowercase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowercase , __lowercase )
continue
# also add unexpected weight so that warning is thrown
A__ : Tuple = jnp.asarray(__lowercase )
else:
# also add unexpected weight so that warning is thrown
A__ : int = jnp.asarray(__lowercase )
return unflatten_dict(__lowercase )
def snake_case__ ( __lowercase , __lowercase ) -> List[str]:
"""simple docstring"""
A__ : int = os.path.abspath(__lowercase )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
A__ : Tuple = getattr(__lowercase , "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(__lowercase , "rb" ) as state_f:
try:
A__ : Tuple = from_bytes(__lowercase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(__lowercase , __lowercase )
def snake_case__ ( __lowercase , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
A__ : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda __lowercase : x.dtype == jnp.bfloataa , __lowercase ) ).values()
if any(__lowercase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
A__ : int = jax.tree_util.tree_map(
lambda __lowercase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __lowercase )
A__ : Union[str, Any] = flatten_dict(__lowercase )
A__ : List[Any] = pt_model.state_dict()
A__ : int = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
A__ : Any = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
A__ : Optional[Any] = []
A__ : str = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
A__ : int = flax_key_tuple[0] == pt_model.base_model_prefix
A__ : int = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
A__ : Dict = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
A__ : Union[str, Any] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__lowercase ) not in pt_model_dict:
# conv layer
A__ : Tuple = flax_key_tuple[:-1] + ("weight",)
A__ : Optional[Any] = jnp.transpose(__lowercase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__lowercase ) not in pt_model_dict:
# linear layer
A__ : str = flax_key_tuple[:-1] + ("weight",)
A__ : Optional[Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A__ : List[Any] = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
A__ : str = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
A__ : List[Any] = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
A__ : Tuple = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
A__ : str = ".".join(__lowercase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
A__ : Dict = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
A__ : Any = key.split("." )
A__ : List[Any] = None
if key_components[-3::2] == ["parametrizations", "original0"]:
A__ : Tuple = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
A__ : str = key_components[-2] + "_v"
if name is not None:
A__ : List[str] = key_components[:-3] + [name]
A__ : Optional[Any] = ".".join(__lowercase )
A__ : Any = key
if flax_key in special_pt_names:
A__ : str = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
A__ : Any = np.asarray(__lowercase ) if not isinstance(__lowercase , np.ndarray ) else flax_tensor
A__ : List[str] = torch.from_numpy(__lowercase )
# remove from missing keys
missing_keys.remove(__lowercase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__lowercase )
pt_model.load_state_dict(__lowercase )
# re-transform missing_keys to list
A__ : Any = list(__lowercase )
if len(__lowercase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(__lowercase ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
" use it for predictions and inference." )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
"If your task is similar to the task the model of the checkpoint was trained on, "
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model | 182 |
def snake_case__ ( __lowercase , __lowercase , __lowercase = 0 , __lowercase = 0 ) -> int:
"""simple docstring"""
A__ : Tuple = right or len(__lowercase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__lowercase , __lowercase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 182 | 1 |
import fire
from utils import calculate_rouge, save_json
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
lowercase : List[str] = [x.strip() for x in open(SCREAMING_SNAKE_CASE__ ).readlines()]
lowercase : List[str] = [x.strip() for x in open(SCREAMING_SNAKE_CASE__ ).readlines()][: len(SCREAMING_SNAKE_CASE__ )]
lowercase : Any = calculate_rouge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if save_path is not None:
save_json(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , indent=SCREAMING_SNAKE_CASE__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 336 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowercase : Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowercase : List[str] = """ \"\"\"
Output class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"\"\"
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
"""
class __snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir ,"""schedulers/""" ) )
lowercase : Any = self.diffusers_dir
shutil.copy(
os.path.join(snake_case ,"""src/diffusers/schedulers/scheduling_ddpm.py""" ) ,os.path.join(self.diffusers_dir ,"""schedulers/scheduling_ddpm.py""" ) ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = """src/diffusers"""
shutil.rmtree(self.diffusers_dir )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case=None ):
'''simple docstring'''
lowercase : Optional[int] = comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
lowercase : Optional[Any] = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
lowercase : List[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=119 )
lowercase : int = black.format_str(snake_case ,mode=snake_case )
lowercase : int = os.path.join(self.diffusers_dir ,"""new_code.py""" )
with open(snake_case ,"""w""" ,newline="""\n""" ) as f:
f.write(snake_case )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(snake_case ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name ,overwrite=snake_case )
with open(snake_case ,"""r""" ) as f:
self.assertTrue(f.read() ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = check_copies.find_code_in_diffusers("""schedulers.scheduling_ddpm.DDPMSchedulerOutput""" )
self.assertEqual(snake_case ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" ,"""DDPMSchedulerOutput""" ,REFERENCE_CODE + """\n""" ,)
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" ,"""DDPMSchedulerOutput""" ,snake_case ,)
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" ,"""TestSchedulerOutput""" ,re.sub("""DDPM""" ,"""Test""" ,snake_case ) ,)
# Copy consistency with a really long name
lowercase : Dict = """TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}" ,f"{long_class_name}SchedulerOutput" ,re.sub("""Bert""" ,snake_case ,snake_case ) ,)
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" ,"""TestSchedulerOutput""" ,snake_case ,overwrite_result=re.sub("""DDPM""" ,"""Test""" ,snake_case ) ,)
| 336 | 1 |
import string
from math import logaa
def lowercase__ ( __A: str ,__A: str ):
'''simple docstring'''
__magic_name__ : List[str] = document.translate(
str.maketrans('''''' ,'''''' ,string.punctuation ) ).replace('''\n''' ,'''''' )
__magic_name__ : Tuple = document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def lowercase__ ( __A: str ,__A: str ):
'''simple docstring'''
__magic_name__ : Optional[int] = corpus.lower().translate(
str.maketrans('''''' ,'''''' ,string.punctuation ) ) # strip all punctuation and replace it with ''
__magic_name__ : Union[str, Any] = corpus_without_punctuation.split('''\n''' )
__magic_name__ : Optional[int] = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__A ))
def lowercase__ ( __A: int ,__A: int ,__A: int=False ):
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ) ,3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ) ,3 )
def lowercase__ ( __A: int ,__A: int ):
'''simple docstring'''
return round(tf * idf ,3 )
| 702 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__lowerCamelCase : int = NewType('''DataClass''', Any)
__lowerCamelCase : Optional[Any] = NewType('''DataClassType''', Any)
def lowercase__ ( __A: List[Any] ):
'''simple docstring'''
if isinstance(__A ,__A ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def lowercase__ ( __A: list ):
'''simple docstring'''
__magic_name__ : Dict = {str(__A ): choice for choice in choices}
return lambda __A : str_to_choice.get(__A ,__A )
def lowercase__ ( *,
__A: Union[str, List[str]] = None ,__A: str = None ,__A: Any = dataclasses.MISSING ,__A: Callable[[], Any] = dataclasses.MISSING ,__A: dict = None ,**__A: Optional[int] ,):
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__magic_name__ : Optional[Any] = {}
if aliases is not None:
__magic_name__ : str = aliases
if help is not None:
__magic_name__ : Optional[int] = help
return dataclasses.field(metadata=__A ,default=__A ,default_factory=__A ,**__A )
class lowerCamelCase ( _lowerCamelCase ):
'''simple docstring'''
UpperCamelCase__ =42
def __init__( self : List[str] , lowerCamelCase_ : Union[DataClassType, Iterable[DataClassType]] , **lowerCamelCase_ : str ) -> Optional[Any]:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
__magic_name__ : List[str] = ArgumentDefaultsHelpFormatter
super().__init__(**lowerCamelCase_ )
if dataclasses.is_dataclass(lowerCamelCase_ ):
__magic_name__ : Union[str, Any] = [dataclass_types]
__magic_name__ : Tuple = list(lowerCamelCase_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(lowerCamelCase_ )
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_ : ArgumentParser , lowerCamelCase_ : dataclasses.Field ) -> str:
__magic_name__ : int = F'''--{field.name}'''
__magic_name__ : str = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , lowerCamelCase_ ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
__magic_name__ : Union[str, Any] = kwargs.pop('''aliases''' , [] )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__magic_name__ : Tuple = [aliases]
__magic_name__ : Optional[int] = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(lowerCamelCase_ , '''UnionType''' ) and isinstance(lowerCamelCase_ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(lowerCamelCase_ ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
F''' Problem encountered in field \'{field.name}\'.''' )
if type(lowerCamelCase_ ) not in field.type.__args__:
# filter `str` in Union
__magic_name__ : int = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__magic_name__ : str = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__magic_name__ : List[str] = (
field.type.__args__[0] if isinstance(lowerCamelCase_ , field.type.__args__[1] ) else field.type.__args__[1]
)
__magic_name__ : Union[str, Any] = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__magic_name__ : Any = {}
if origin_type is Literal or (isinstance(field.type , lowerCamelCase_ ) and issubclass(field.type , lowerCamelCase_ )):
if origin_type is Literal:
__magic_name__ : Optional[int] = field.type.__args__
else:
__magic_name__ : Dict = [x.value for x in field.type]
__magic_name__ : Union[str, Any] = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
__magic_name__ : List[Any] = field.default
else:
__magic_name__ : List[str] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__magic_name__ : Union[str, Any] = copy(lowerCamelCase_ )
# Hack because type=bool in argparse does not behave as we want.
__magic_name__ : str = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__magic_name__ : Union[str, Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__magic_name__ : int = default
# This tells argparse we accept 0 or 1 value after --field_name
__magic_name__ : Tuple = '''?'''
# This is the value that will get picked if we do --field_name (without value)
__magic_name__ : Any = True
elif isclass(lowerCamelCase_ ) and issubclass(lowerCamelCase_ , lowerCamelCase_ ):
__magic_name__ : Tuple = field.type.__args__[0]
__magic_name__ : List[str] = '''+'''
if field.default_factory is not dataclasses.MISSING:
__magic_name__ : int = field.default_factory()
elif field.default is dataclasses.MISSING:
__magic_name__ : str = True
else:
__magic_name__ : Tuple = field.type
if field.default is not dataclasses.MISSING:
__magic_name__ : str = field.default
elif field.default_factory is not dataclasses.MISSING:
__magic_name__ : Any = field.default_factory()
else:
__magic_name__ : Any = True
parser.add_argument(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__magic_name__ : Dict = False
parser.add_argument(F'''--no_{field.name}''' , action='''store_false''' , dest=field.name , **lowerCamelCase_ )
def UpperCAmelCase__ ( self : Any , lowerCamelCase_ : DataClassType ) -> Optional[int]:
if hasattr(lowerCamelCase_ , '''_argument_group_name''' ):
__magic_name__ : Tuple = self.add_argument_group(dtype._argument_group_name )
else:
__magic_name__ : Any = self
try:
__magic_name__ : Dict[str, type] = get_type_hints(lowerCamelCase_ )
except NameError:
raise RuntimeError(
F'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(lowerCamelCase_ ):
__magic_name__ : Any = '''.'''.join(map(lowerCamelCase_ , sys.version_info[:3] ) )
raise RuntimeError(
F'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(lowerCamelCase_ ):
if not field.init:
continue
__magic_name__ : Tuple = type_hints[field.name]
self._parse_dataclass_field(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : List[Any]=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
__magic_name__ : int = []
if args_filename:
args_files.append(Path(lowerCamelCase_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__magic_name__ : str = ArgumentParser()
args_file_parser.add_argument(lowerCamelCase_ , type=lowerCamelCase_ , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
__magic_name__ , __magic_name__ : List[str] = args_file_parser.parse_known_args(args=lowerCamelCase_ )
__magic_name__ : List[Any] = vars(lowerCamelCase_ ).get(args_file_flag.lstrip('''-''' ) , lowerCamelCase_ )
if cmd_args_file_paths:
args_files.extend([Path(lowerCamelCase_ ) for p in cmd_args_file_paths] )
__magic_name__ : List[str] = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__magic_name__ : Optional[int] = file_args + args if args is not None else file_args + sys.argv[1:]
__magic_name__ , __magic_name__ : Tuple = self.parse_known_args(args=lowerCamelCase_ )
__magic_name__ : Any = []
for dtype in self.dataclass_types:
__magic_name__ : str = {f.name for f in dataclasses.fields(lowerCamelCase_ ) if f.init}
__magic_name__ : Tuple = {k: v for k, v in vars(lowerCamelCase_ ).items() if k in keys}
for k in keys:
delattr(lowerCamelCase_ , lowerCamelCase_ )
__magic_name__ : Optional[Any] = dtype(**lowerCamelCase_ )
outputs.append(lowerCamelCase_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(lowerCamelCase_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def UpperCAmelCase__ ( self : int , lowerCamelCase_ : Dict[str, Any] , lowerCamelCase_ : bool = False ) -> Tuple[DataClass, ...]:
__magic_name__ : int = set(args.keys() )
__magic_name__ : Any = []
for dtype in self.dataclass_types:
__magic_name__ : int = {f.name for f in dataclasses.fields(lowerCamelCase_ ) if f.init}
__magic_name__ : List[str] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
__magic_name__ : Optional[Any] = dtype(**lowerCamelCase_ )
outputs.append(lowerCamelCase_ )
if not allow_extra_keys and unused_keys:
raise ValueError(F'''Some keys are not used by the HfArgumentParser: {sorted(lowerCamelCase_ )}''' )
return tuple(lowerCamelCase_ )
def UpperCAmelCase__ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : bool = False ) -> Tuple[DataClass, ...]:
with open(Path(lowerCamelCase_ ) , encoding='''utf-8''' ) as open_json_file:
__magic_name__ : Any = json.loads(open_json_file.read() )
__magic_name__ : Tuple = self.parse_dict(lowerCamelCase_ , allow_extra_keys=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
def UpperCAmelCase__ ( self : str , lowerCamelCase_ : str , lowerCamelCase_ : bool = False ) -> Tuple[DataClass, ...]:
__magic_name__ : Any = self.parse_dict(yaml.safe_load(Path(lowerCamelCase_ ).read_text() ) , allow_extra_keys=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
| 501 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCAmelCase :
"""simple docstring"""
def __init__( self , _A , _A=3 , _A=32 , _A=3 , _A=10 , _A=[8, 16, 32, 64] , _A=[1, 1, 2, 1] , _A=True , _A=True , _A="relu" , _A=3 , _A=None , _A=["stage2", "stage3", "stage4"] , _A=[2, 3, 4] , _A=1 , ) -> str:
__a : Optional[int] = parent
__a : Dict = batch_size
__a : Union[str, Any] = image_size
__a : Tuple = num_channels
__a : Optional[Any] = embeddings_size
__a : List[Any] = hidden_sizes
__a : str = depths
__a : int = is_training
__a : List[Any] = use_labels
__a : Any = hidden_act
__a : Union[str, Any] = num_labels
__a : Optional[Any] = scope
__a : Union[str, Any] = len(_A )
__a : Optional[int] = out_features
__a : str = out_indices
__a : Dict = num_groups
def __magic_name__ ( self ) -> Union[str, Any]:
__a : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : List[Any] = None
if self.use_labels:
__a : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
__a : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self ) -> Dict:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __magic_name__ ( self , _A , _A , _A ) -> Tuple:
__a : str = BitModel(config=_A )
model.to(_A )
model.eval()
__a : Optional[Any] = model(_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __magic_name__ ( self , _A , _A , _A ) -> str:
__a : Union[str, Any] = self.num_labels
__a : Optional[Any] = BitForImageClassification(_A )
model.to(_A )
model.eval()
__a : Tuple = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self , _A , _A , _A ) -> List[Any]:
__a : str = BitBackbone(config=_A )
model.to(_A )
model.eval()
__a : Optional[int] = model(_A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__a : List[Any] = None
__a : List[Any] = BitBackbone(config=_A )
model.to(_A )
model.eval()
__a : str = model(_A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __magic_name__ ( self ) -> Optional[Any]:
__a : List[str] = self.prepare_config_and_inputs()
__a , __a , __a : List[str] = config_and_inputs
__a : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_A = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
_A = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
_A = False
_A = False
_A = False
_A = False
_A = False
def __magic_name__ ( self ) -> List[Any]:
__a : Any = BitModelTester(self )
__a : Optional[int] = ConfigTester(self , config_class=_A , has_text_modality=_A )
def __magic_name__ ( self ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self ) -> int:
return
@unittest.skip(reason='Bit does not output attentions' )
def __magic_name__ ( self ) -> Dict:
pass
@unittest.skip(reason='Bit does not use inputs_embeds' )
def __magic_name__ ( self ) -> Any:
pass
@unittest.skip(reason='Bit does not support input and output embeddings' )
def __magic_name__ ( self ) -> Optional[int]:
pass
def __magic_name__ ( self ) -> List[str]:
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[Any] = model_class(_A )
__a : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : int = [*signature.parameters.keys()]
__a : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def __magic_name__ ( self ) -> Dict:
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __magic_name__ ( self ) -> Optional[int]:
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_A )
def __magic_name__ ( self ) -> Optional[int]:
__a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : int = model_class(config=_A )
for name, module in model.named_modules():
if isinstance(_A , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def __magic_name__ ( self ) -> List[str]:
def check_hidden_states_output(_A , _A , _A ):
__a : Optional[Any] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
__a : str = model(**self._prepare_for_class(_A , _A ) )
__a : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__a : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(_A ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__a , __a : int = self.model_tester.prepare_config_and_inputs_for_common()
__a : Optional[Any] = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__a : Any = layer_type
__a : List[str] = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : Optional[Any] = True
check_hidden_states_output(_A , _A , _A )
@unittest.skip(reason='Bit does not use feedforward chunking' )
def __magic_name__ ( self ) -> Union[str, Any]:
pass
def __magic_name__ ( self ) -> List[Any]:
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def __magic_name__ ( self ) -> Tuple:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Optional[int] = BitModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowerCAmelCase__ ( ):
__a : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __magic_name__ ( self ) -> int:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __magic_name__ ( self ) -> Dict:
__a : str = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_A )
__a : List[Any] = self.default_image_processor
__a : int = prepare_img()
__a : Optional[int] = image_processor(images=_A , return_tensors='pt' ).to(_A )
# forward pass
with torch.no_grad():
__a : Any = model(**_A )
# verify the logits
__a : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _A )
__a : Optional[Any] = torch.tensor([[-0.6_526, -0.5_263, -1.4_398]] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) )
@require_torch
class lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_A = (BitBackbone,) if is_torch_available() else ()
_A = BitConfig
_A = False
def __magic_name__ ( self ) -> Dict:
__a : str = BitModelTester(self )
| 597 |
'''simple docstring'''
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : Optional[int] = XCLIPTextConfig()
# derive patch size from model name
__a : List[str] = model_name.find('patch' )
__a : int = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
__a : Optional[Any] = XCLIPVisionConfig(patch_size=SCREAMING_SNAKE_CASE__ , num_frames=SCREAMING_SNAKE_CASE__ )
if "large" in model_name:
__a : List[Any] = 768
__a : List[str] = 3072
__a : str = 12
__a : str = 1024
__a : Optional[int] = 4096
__a : Optional[Any] = 16
__a : str = 24
__a : List[str] = 768
__a : Union[str, Any] = 3072
if model_name == "xclip-large-patch14-16-frames":
__a : Optional[int] = 336
__a : Dict = XCLIPConfig.from_text_vision_configs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "large" in model_name:
__a : Any = 768
return config
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
# text encoder
if name == "token_embedding.weight":
__a : str = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
__a : int = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
__a : Optional[Any] = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
__a : Optional[Any] = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
__a : str = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
__a : Union[str, Any] = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
__a : List[str] = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
__a : Dict = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
__a : str = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
__a : int = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
__a : Tuple = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
__a : Optional[Any] = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
__a : Tuple = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
__a : int = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
__a : int = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
__a : List[str] = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
__a : Tuple = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
__a : int = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
__a : Dict = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
__a : Union[str, Any] = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
__a : List[Any] = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
__a : Any = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for key in orig_state_dict.copy().keys():
__a : Tuple = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "attn.in_proj" in key:
__a : Dict = key.split('.' )
if key.startswith('visual' ):
__a : Optional[Any] = key_split[3]
__a : int = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__a : List[Any] = val[
:dim, :
]
__a : List[Any] = val[
dim : dim * 2, :
]
__a : List[str] = val[
-dim:, :
]
else:
__a : Dict = val[
:dim
]
__a : Dict = val[
dim : dim * 2
]
__a : List[Any] = val[
-dim:
]
else:
if "weight" in key:
__a : Optional[int] = val[
:dim, :
]
__a : str = val[
dim : dim * 2, :
]
__a : str = val[
-dim:, :
]
else:
__a : Union[str, Any] = val[:dim]
__a : List[Any] = val[
dim : dim * 2
]
__a : Optional[int] = val[-dim:]
elif key.startswith('mit' ):
__a : Any = key_split[2]
__a : Dict = config.vision_config.mit_hidden_size
if "weight" in key:
__a : List[Any] = val[:dim, :]
__a : Union[str, Any] = val[dim : dim * 2, :]
__a : Tuple = val[-dim:, :]
else:
__a : Optional[int] = val[:dim]
__a : str = val[dim : dim * 2]
__a : Dict = val[-dim:]
else:
__a : Union[str, Any] = key_split[2]
__a : Any = config.text_config.hidden_size
if "weight" in key:
__a : Tuple = val[:dim, :]
__a : int = val[
dim : dim * 2, :
]
__a : List[Any] = val[-dim:, :]
else:
__a : Dict = val[:dim]
__a : Dict = val[
dim : dim * 2
]
__a : Union[str, Any] = val[-dim:]
else:
__a : Dict = rename_key(SCREAMING_SNAKE_CASE__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__a : str = val.T
__a : Union[str, Any] = val
return orig_state_dict
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
if num_frames == 8:
__a : Optional[Any] = 'eating_spaghetti_8_frames.npy'
elif num_frames == 16:
__a : List[Any] = 'eating_spaghetti.npy'
elif num_frames == 32:
__a : Union[str, Any] = 'eating_spaghetti_32_frames.npy'
__a : List[str] = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=SCREAMING_SNAKE_CASE__ , repo_type='dataset' , )
__a : List[str] = np.load(SCREAMING_SNAKE_CASE__ )
return list(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=False ):
__a : Tuple = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
__a : str = model_to_url[model_name]
__a : Optional[Any] = 8
if "16-frames" in model_name:
__a : List[Any] = 16
elif "shot" in model_name:
__a : List[Any] = 32
__a : Optional[int] = get_xclip_config(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : List[Any] = XCLIPModel(SCREAMING_SNAKE_CASE__ )
model.eval()
if "drive" in checkpoint_url:
__a : Union[str, Any] = 'pytorch_model.bin'
gdown.cached_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , quiet=SCREAMING_SNAKE_CASE__ )
__a : Tuple = torch.load(SCREAMING_SNAKE_CASE__ , map_location='cpu' )['model']
else:
__a : Any = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ )['model']
__a : Dict = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : Dict = XCLIPModel(SCREAMING_SNAKE_CASE__ )
__a , __a : Any = model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__a : Dict = 336 if model_name == 'xclip-large-patch14-16-frames' else 224
__a : Tuple = VideoMAEImageProcessor(size=SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
__a : str = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
__a : int = XCLIPProcessor(image_processor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
__a : Dict = prepare_video(SCREAMING_SNAKE_CASE__ )
__a : Any = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=SCREAMING_SNAKE_CASE__ , return_tensors='pt' , padding=SCREAMING_SNAKE_CASE__ )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
__a : List[Any] = model(**SCREAMING_SNAKE_CASE__ )
# Verify outputs
__a : int = outputs.logits_per_video
__a : Optional[int] = logits_per_video.softmax(dim=1 )
print('Probs:' , SCREAMING_SNAKE_CASE__ )
# kinetics-400
if model_name == "xclip-base-patch32":
__a : str = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
__a : Optional[int] = torch.tensor([[7.09_99E-04, 9.98_83E-01, 4.55_80E-04]] )
elif model_name == "xclip-base-patch16":
__a : Any = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
__a : Tuple = torch.tensor([[7.69_37E-04, 9.97_28E-01, 1.94_73E-03]] )
elif model_name == "xclip-large-patch14":
__a : Tuple = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
__a : Tuple = torch.tensor([[3.38_77E-04, 9.99_37E-01, 2.88_88E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__a : List[Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__a : str = torch.tensor([[3.85_54E-04, 9.99_29E-01, 3.27_54E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__a : List[Any] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__a : Optional[int] = torch.tensor([[7.18_90E-06, 9.99_94E-01, 5.65_59E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__a : Union[str, Any] = torch.tensor([[1.03_20E-05, 9.99_93E-01, 6.24_35E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__a : Dict = torch.tensor([[4.13_77E-06, 9.99_90E-01, 9.83_86E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__a : str = torch.tensor([[4.13_47E-05, 9.99_62E-01, 3.34_11E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__a : str = torch.tensor([[8.58_57E-05, 9.99_28E-01, 6.32_91E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__a : str = torch.tensor([[8.58_57E-05, 9.99_28E-01, 6.32_91E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__a : Optional[int] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__a : Any = torch.tensor([[9.82_19E-04, 9.95_93E-01, 3.08_63E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__a : str = torch.tensor([[3.50_82E-04, 9.97_85E-01, 1.79_66E-03]] )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(SCREAMING_SNAKE_CASE__ , organization='nielsr' )
processor.push_to_hub(SCREAMING_SNAKE_CASE__ , organization='nielsr' )
slow_tokenizer.push_to_hub(SCREAMING_SNAKE_CASE__ , organization='nielsr' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 597 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , _lowerCamelCase : int , _lowerCamelCase : List[str]=1_3 , _lowerCamelCase : Tuple=3 , _lowerCamelCase : Any=2_2_4 , _lowerCamelCase : Optional[int]=3_0 , _lowerCamelCase : Optional[Any]=4_0_0 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[str]=True , _lowerCamelCase : Optional[int]=[0.5, 0.5, 0.5] , _lowerCamelCase : Union[str, Any]=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
__lowerCamelCase : int = size if size is not None else {"""height""": 1_8, """width""": 1_8}
__lowerCamelCase : Optional[int] = parent
__lowerCamelCase : Dict = batch_size
__lowerCamelCase : List[Any] = num_channels
__lowerCamelCase : Tuple = image_size
__lowerCamelCase : int = min_resolution
__lowerCamelCase : List[Any] = max_resolution
__lowerCamelCase : List[str] = do_resize
__lowerCamelCase : str = size
__lowerCamelCase : Optional[Any] = do_normalize
__lowerCamelCase : List[str] = image_mean
__lowerCamelCase : List[str] = image_std
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _UpperCamelCase ( A,unittest.TestCase ):
'''simple docstring'''
a_ : Tuple = ViTImageProcessor if is_vision_available() else None
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowerCamelCase : int = EfficientFormerImageProcessorTester(self )
@property
def _snake_case ( self : Tuple ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """image_std""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """size""" ) )
def _snake_case ( self : List[Any] ):
'''simple docstring'''
pass
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
__lowerCamelCase : List[Any] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
__lowerCamelCase : Tuple = image_processor(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
__lowerCamelCase : List[str] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
__lowerCamelCase : Dict = image_processor(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
__lowerCamelCase : Any = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
__lowerCamelCase : List[Any] = image_processor(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 458 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _UpperCamelCase ( A ):
'''simple docstring'''
a_ : List[Any] = ["image_processor", "tokenizer"]
a_ : Tuple = "AutoImageProcessor"
a_ : Tuple = "AutoTokenizer"
def __init__( self : Union[str, Any] , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Any=None , **_lowerCamelCase : List[str] ):
'''simple docstring'''
__lowerCamelCase : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _lowerCamelCase , )
__lowerCamelCase : List[Any] = kwargs.pop("""feature_extractor""" )
__lowerCamelCase : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : List[Any] = self.image_processor
__lowerCamelCase : Any = False
def __call__( self : Union[str, Any] , *_lowerCamelCase : str , **_lowerCamelCase : Dict ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_lowerCamelCase , **_lowerCamelCase )
__lowerCamelCase : List[str] = kwargs.pop("""images""" , _lowerCamelCase )
__lowerCamelCase : List[str] = kwargs.pop("""text""" , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
__lowerCamelCase : Tuple = args[0]
__lowerCamelCase : List[Any] = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
__lowerCamelCase : Tuple = self.image_processor(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
if text is not None:
__lowerCamelCase : int = self.tokenizer(_lowerCamelCase , **_lowerCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCamelCase : Optional[int] = encodings["""input_ids"""]
return inputs
def _snake_case ( self : List[Any] , *_lowerCamelCase : Tuple , **_lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def _snake_case ( self : List[str] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : int ):
'''simple docstring'''
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@contextmanager
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
__lowerCamelCase : Any = True
__lowerCamelCase : int = self.tokenizer
yield
__lowerCamelCase : Dict = self.image_processor
__lowerCamelCase : List[Any] = False
def _snake_case ( self : List[str] , _lowerCamelCase : str , _lowerCamelCase : List[str]=False , _lowerCamelCase : List[Any]=None ):
'''simple docstring'''
if added_vocab is None:
__lowerCamelCase : List[str] = self.tokenizer.get_added_vocab()
__lowerCamelCase : List[Any] = {}
while tokens:
__lowerCamelCase : Dict = re.search(R"""<s_(.*?)>""" , _lowerCamelCase , re.IGNORECASE )
if start_token is None:
break
__lowerCamelCase : Dict = start_token.group(1 )
__lowerCamelCase : List[str] = re.search(RF"""</s_{key}>""" , _lowerCamelCase , re.IGNORECASE )
__lowerCamelCase : Optional[int] = start_token.group()
if end_token is None:
__lowerCamelCase : int = tokens.replace(_lowerCamelCase , """""" )
else:
__lowerCamelCase : Dict = end_token.group()
__lowerCamelCase : Optional[Any] = re.escape(_lowerCamelCase )
__lowerCamelCase : List[str] = re.escape(_lowerCamelCase )
__lowerCamelCase : Any = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" , _lowerCamelCase , re.IGNORECASE )
if content is not None:
__lowerCamelCase : Union[str, Any] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__lowerCamelCase : Union[str, Any] = self.tokenajson(_lowerCamelCase , is_inner_value=_lowerCamelCase , added_vocab=_lowerCamelCase )
if value:
if len(_lowerCamelCase ) == 1:
__lowerCamelCase : Optional[int] = value[0]
__lowerCamelCase : Union[str, Any] = value
else: # leaf nodes
__lowerCamelCase : List[Any] = []
for leaf in content.split(R"""<sep/>""" ):
__lowerCamelCase : Optional[Any] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__lowerCamelCase : str = leaf[1:-2] # for categorical special tokens
output[key].append(_lowerCamelCase )
if len(output[key] ) == 1:
__lowerCamelCase : Tuple = output[key][0]
__lowerCamelCase : Tuple = tokens[tokens.find(_lowerCamelCase ) + len(_lowerCamelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_lowerCamelCase , added_vocab=_lowerCamelCase )
if len(_lowerCamelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def _snake_case ( self : Tuple ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _lowerCamelCase , )
return self.image_processor_class
@property
def _snake_case ( self : List[Any] ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _lowerCamelCase , )
return self.image_processor
| 458 | 1 |
'''simple docstring'''
import argparse
import os
import re
__UpperCAmelCase = "src/diffusers"
# Pattern that looks at the indentation in a line.
__UpperCAmelCase = re.compile(R"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
__UpperCAmelCase = re.compile(R"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__UpperCAmelCase = re.compile(R"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
__UpperCAmelCase = re.compile(R"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__UpperCAmelCase = re.compile(R"\[([^\]]+)\]")
def lowerCAmelCase_ ( __A : Any ):
'''simple docstring'''
snake_case: int = _re_indent.search(__A )
return "" if search is None else search.groups()[0]
def lowerCAmelCase_ ( __A : List[str] , __A : Dict="" , __A : List[Any]=None , __A : Optional[Any]=None ):
'''simple docstring'''
snake_case: Union[str, Any] = 0
snake_case: Tuple = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(__A ):
index += 1
snake_case: Dict = ['\n'.join(lines[:index] )]
else:
snake_case: List[str] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
snake_case: str = [lines[index]]
index += 1
while index < len(__A ) and (end_prompt is None or not lines[index].startswith(__A )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(__A ) )
if index < len(__A ) - 1:
snake_case: List[str] = [lines[index + 1]]
index += 1
else:
snake_case: Any = []
else:
blocks.append('\n'.join(__A ) )
snake_case: Any = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__A ) > 0:
blocks.append('\n'.join(__A ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__A ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def lowerCAmelCase_ ( __A : Optional[int] ):
'''simple docstring'''
def _inner(__A : Tuple ):
return key(__A ).lower().replace('_' , '' )
return _inner
def lowerCAmelCase_ ( __A : List[str] , __A : Any=None ):
'''simple docstring'''
def noop(__A : List[Any] ):
return x
if key is None:
snake_case: Tuple = noop
# Constants are all uppercase, they go first.
snake_case: Optional[Any] = [obj for obj in objects if key(__A ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
snake_case: Optional[Any] = [obj for obj in objects if key(__A )[0].isupper() and not key(__A ).isupper()]
# Functions begin with a lowercase, they go last.
snake_case: List[Any] = [obj for obj in objects if not key(__A )[0].isupper()]
snake_case: Optional[int] = ignore_underscore(__A )
return sorted(__A , key=__A ) + sorted(__A , key=__A ) + sorted(__A , key=__A )
def lowerCAmelCase_ ( __A : List[str] ):
'''simple docstring'''
def _replace(__A : Optional[int] ):
snake_case: Dict = match.groups()[0]
if "," not in imports:
return f"""[{imports}]"""
snake_case: Optional[int] = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case: Union[str, Any] = keys[:-1]
return "[" + ", ".join([f"""\"{k}\"""" for k in sort_objects(__A )] ) + "]"
snake_case: List[Any] = import_statement.split('\n' )
if len(__A ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
snake_case: int = 2 if lines[1].strip() == '[' else 1
snake_case: Union[str, Any] = [(i, _re_strip_line.search(__A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
snake_case: Optional[int] = sort_objects(__A , key=lambda __A : x[1] )
snake_case: List[Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__A ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
snake_case: int = _re_bracket_content.sub(_replace , lines[1] )
else:
snake_case: List[Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case: List[Any] = keys[:-1]
snake_case: str = get_indent(lines[1] ) + ', '.join([f"""\"{k}\"""" for k in sort_objects(__A )] )
return "\n".join(__A )
else:
# Finally we have to deal with imports fitting on one line
snake_case: List[str] = _re_bracket_content.sub(_replace , __A )
return import_statement
def lowerCAmelCase_ ( __A : Optional[int] , __A : Tuple=True ):
'''simple docstring'''
with open(__A , 'r' ) as f:
snake_case: int = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
snake_case: int = split_code_in_indented_blocks(
__A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__A ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
snake_case: int = main_blocks[block_idx]
snake_case: List[str] = block.split('\n' )
# Get to the start of the imports.
snake_case: List[Any] = 0
while line_idx < len(__A ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
snake_case: str = len(__A )
else:
line_idx += 1
if line_idx >= len(__A ):
continue
# Ignore beginning and last line: they don't contain anything.
snake_case: Tuple = '\n'.join(block_lines[line_idx:-1] )
snake_case: str = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
snake_case: int = split_code_in_indented_blocks(__A , indent_level=__A )
# We have two categories of import key: list or _import_structure[key].append/extend
snake_case: Optional[Any] = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
snake_case: Optional[Any] = [(pattern.search(__A ).groups()[0] if pattern.search(__A ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
snake_case: List[str] = [(i, key) for i, key in enumerate(__A ) if key is not None]
snake_case: str = [x[0] for x in sorted(__A , key=lambda __A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
snake_case: List[Any] = 0
snake_case: Dict = []
for i in range(len(__A ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
snake_case: Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__A )
count += 1
# And we put our main block back together with its first and last line.
snake_case: List[Any] = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__A ):
if check_only:
return True
else:
print(f"""Overwriting {file}.""" )
with open(__A , 'w' ) as f:
f.write('\n'.join(__A ) )
def lowerCAmelCase_ ( __A : Optional[int]=True ):
'''simple docstring'''
snake_case: str = []
for root, _, files in os.walk(__A ):
if "__init__.py" in files:
snake_case: Optional[Any] = sort_imports(os.path.join(__A , '__init__.py' ) , check_only=__A )
if result:
snake_case: Union[str, Any] = [os.path.join(__A , '__init__.py' )]
if len(__A ) > 0:
raise ValueError(f"""Would overwrite {len(__A )} files, run `make style`.""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
__UpperCAmelCase = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only) | 329 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCAmelCase_ ( __A : str , __A : str , __A : str , __A : PreTrainedTokenizer , __A : int , __A : Optional[int] = None , ):
'''simple docstring'''
snake_case: Union[str, Any] = {}
if train_file is not None:
snake_case: Any = [train_file]
if eval_file is not None:
snake_case: Dict = [eval_file]
if test_file is not None:
snake_case: List[str] = [test_file]
snake_case: Tuple = datasets.load_dataset('csv' , data_files=__A )
snake_case: Optional[Any] = list(ds[list(files.keys() )[0]].features.keys() )
snake_case: Optional[int] = features_name.pop(__A )
snake_case: Any = list(set(ds[list(files.keys() )[0]][label_name] ) )
snake_case: Dict = {label: i for i, label in enumerate(__A )}
snake_case: Optional[Any] = tokenizer.model_input_names
snake_case: int = {}
if len(__A ) == 1:
for k in files.keys():
snake_case: List[str] = ds[k].map(
lambda __A : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__A , max_length=__A , padding='max_length' ) , batched=__A , )
elif len(__A ) == 2:
for k in files.keys():
snake_case: Union[str, Any] = ds[k].map(
lambda __A : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__A , max_length=__A , padding='max_length' , ) , batched=__A , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
snake_case: int = {k: v for k, v in ex.items() if k in input_names}
snake_case: List[str] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
snake_case: Union[str, Any] = {k: v for k, v in ex.items() if k in input_names}
snake_case: Dict = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
snake_case: Union[str, Any] = {k: v for k, v in ex.items() if k in input_names}
snake_case: Union[str, Any] = labelaid[ex[label_name]]
yield (d, label)
snake_case: Dict = (
tf.data.Dataset.from_generator(
__A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
snake_case: Tuple = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
snake_case: str = (
tf.data.Dataset.from_generator(
__A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
snake_case: str = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
snake_case: int = (
tf.data.Dataset.from_generator(
__A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
snake_case: str = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
__UpperCamelCase = field(metadata={"help": "Which column contains the label"} )
__UpperCamelCase = field(default=snake_case , metadata={"help": "The path of the training file"} )
__UpperCamelCase = field(default=snake_case , metadata={"help": "The path of the development file"} )
__UpperCamelCase = field(default=snake_case , metadata={"help": "The path of the test file"} )
__UpperCamelCase = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
__UpperCamelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__UpperCamelCase = field(default=snake_case , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
snake_case , snake_case , snake_case: int = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case: Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case , snake_case , snake_case , snake_case: Any = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__A , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
snake_case: List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__A ) , labelaid=__A , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
snake_case: List[str] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=__A , cache_dir=model_args.cache_dir , )
def compute_metrics(__A : EvalPrediction ) -> Dict:
snake_case: Optional[Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
snake_case: Dict = TFTrainer(
model=__A , args=__A , train_dataset=__A , eval_dataset=__A , compute_metrics=__A , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case: str = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case: Any = trainer.evaluate()
snake_case: List[str] = os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(__A , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(__A )
return results
if __name__ == "__main__":
main() | 329 | 1 |
"""simple docstring"""
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def snake_case__ ( _lowerCamelCase ) ->Any:
"""simple docstring"""
__lowercase : List[Any] = []
if isinstance(__lowerCAmelCase, __lowerCAmelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(__lowerCAmelCase ) )
elif isinstance(__lowerCAmelCase, (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(__lowerCAmelCase ) )
elif isinstance(__lowerCAmelCase, torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->str:
"""simple docstring"""
__lowercase : List[Any] = []
for d in reversed(__lowerCAmelCase ):
idx.append(flat_idx % d )
__lowercase : Dict = flat_idx // d
return tuple(reversed(__lowerCAmelCase ) )
@torch.jit.ignore
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase = None, _lowerCamelCase = None, ) ->Tuple:
"""simple docstring"""
def reduce_edge_list(_lowerCamelCase ) -> None:
__lowercase : List[Any] = True
for i in range(len(__lowerCAmelCase ) ):
__lowercase : Tuple = -1 * (i + 1)
l[reversed_idx] &= tally
__lowercase : str = l[reversed_idx]
if start_edges is None:
__lowercase : Union[str, Any] = [s == 0 for s in start]
reduce_edge_list(__lowerCAmelCase )
if end_edges is None:
__lowercase : List[str] = [e == (d - 1) for e, d in zip(__lowerCAmelCase, __lowerCAmelCase )]
reduce_edge_list(__lowerCAmelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(__lowerCAmelCase ) == 0:
return [()]
elif len(__lowerCAmelCase ) == 1:
return [(slice(start[0], end[0] + 1 ),)]
__lowercase : List[Tuple[slice, ...]] = []
__lowercase : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(__lowerCAmelCase, __lowerCAmelCase ):
if s == e:
path_list.append(slice(__lowerCAmelCase, s + 1 ) )
else:
break
__lowercase : Tuple[slice, ...] = tuple(__lowerCAmelCase )
__lowercase : Tuple = len(__lowerCAmelCase )
# start == end, and we're done
if divergence_idx == len(__lowerCAmelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__lowercase : List[str] = start[divergence_idx]
return tuple(
path + (slice(__lowerCAmelCase, sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :], [d - 1 for d in dims[divergence_idx + 1 :]], dims[divergence_idx + 1 :], start_edges=start_edges[divergence_idx + 1 :], end_edges=[True for _ in end_edges[divergence_idx + 1 :]], ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__lowercase : int = end[divergence_idx]
return tuple(
path + (slice(__lowerCAmelCase, edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]], end[divergence_idx + 1 :], dims[divergence_idx + 1 :], start_edges=[True for _ in start_edges[divergence_idx + 1 :]], end_edges=end_edges[divergence_idx + 1 :], ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx], end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx], end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1, end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
__lowercase : str = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1, end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) ->Optional[int]:
"""simple docstring"""
__lowercase : Any = t.shape[:no_batch_dims]
__lowercase : Dict = list(_flat_idx_to_idx(__lowerCAmelCase, __lowerCAmelCase ) )
# _get_minimal_slice_set is inclusive
__lowercase : Optional[int] = list(_flat_idx_to_idx(flat_end - 1, __lowerCAmelCase ) )
# Get an ordered list of slices to perform
__lowercase : Union[str, Any] = _get_minimal_slice_set(
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, )
__lowercase : Optional[Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase = False, _lowerCamelCase = None, _lowerCamelCase = False, ) ->Any:
"""simple docstring"""
if not (len(__lowerCAmelCase ) > 0):
raise ValueError("Must provide at least one input" )
__lowercase : int = [shape[:no_batch_dims] for shape in _fetch_dims(__lowerCAmelCase )]
__lowercase : Optional[int] = tuple([max(__lowerCAmelCase ) for s in zip(*__lowerCAmelCase )] )
def _prep_inputs(_lowerCamelCase ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
__lowercase : Optional[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
__lowercase : str = t.reshape(-1, *t.shape[no_batch_dims:] )
else:
__lowercase : List[str] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
__lowercase : Dict[str, Any] = tensor_tree_map(_prep_inputs, __lowerCAmelCase )
__lowercase : List[Any] = None
if _out is not None:
__lowercase : Optional[int] = tensor_tree_map(lambda _lowerCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ), _out )
__lowercase : List[Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
__lowercase : List[Any] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(_lowerCamelCase ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
__lowercase : Tuple = 0
__lowercase : Any = prepped_outputs
for _ in range(__lowerCAmelCase ):
# Chunk the input
if not low_mem:
__lowercase : Optional[int] = _select_chunk
else:
__lowercase : Tuple = partial(
_chunk_slice, flat_start=__lowerCAmelCase, flat_end=min(__lowerCAmelCase, i + chunk_size ), no_batch_dims=len(__lowerCAmelCase ), )
__lowercase : Dict[str, Any] = tensor_tree_map(__lowerCAmelCase, __lowerCAmelCase )
# Run the layer on the chunk
__lowercase : Optional[int] = layer(**__lowerCAmelCase )
# Allocate space for the output
if out is None:
__lowercase : Optional[Any] = tensor_tree_map(lambda _lowerCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ), __lowerCAmelCase )
# Put the chunk in its pre-allocated space
if isinstance(__lowerCAmelCase, __lowerCAmelCase ):
def assign(_lowerCamelCase, _lowerCamelCase ) -> None:
for k, v in da.items():
if isinstance(__lowerCAmelCase, __lowerCAmelCase ):
assign(__lowerCAmelCase, da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
__lowercase : str = da[k]
assign(__lowerCAmelCase, __lowerCAmelCase )
elif isinstance(__lowerCAmelCase, __lowerCAmelCase ):
for xa, xa in zip(__lowerCAmelCase, __lowerCAmelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
__lowercase : Any = xa
elif isinstance(__lowerCAmelCase, torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
__lowercase : int = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
__lowercase : Union[str, Any] = tensor_tree_map(lambda _lowerCamelCase : t.view(orig_batch_dims + t.shape[1:] ), __lowerCAmelCase )
return out
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Tuple , lowercase__ : int = 5_1_2 , ):
__lowercase : str = max_chunk_size
__lowercase : Optional[int] = None
__lowercase : Optional[tuple] = None
def snake_case ( self : Union[str, Any] , lowercase__ : Callable , lowercase__ : tuple , lowercase__ : int ):
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
__lowercase : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
__lowercase : List[str] = [c for c in candidates if c > min_chunk_size]
__lowercase : int = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(lowercase__ : int ) -> bool:
try:
with torch.no_grad():
fn(*lowerCamelCase__ , chunk_size=lowerCamelCase__ )
return True
except RuntimeError:
return False
__lowercase : List[Any] = 0
__lowercase : Tuple = len(lowerCamelCase__ ) - 1
while i > min_viable_chunk_size_index:
__lowercase : List[Any] = test_chunk_size(candidates[i] )
if not viable:
__lowercase : Union[str, Any] = (min_viable_chunk_size_index + i) // 2
else:
__lowercase : Optional[Any] = i
__lowercase : int = (i + len(lowerCamelCase__ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def snake_case ( self : Union[str, Any] , lowercase__ : Iterable , lowercase__ : Iterable ):
__lowercase : List[str] = True
for aa, aa in zip(lowerCamelCase__ , lowerCamelCase__ ):
assert type(lowerCamelCase__ ) == type(lowerCamelCase__ )
if isinstance(lowerCamelCase__ , (list, tuple) ):
consistent &= self._compare_arg_caches(lowerCamelCase__ , lowerCamelCase__ )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowercase : str = [v for _, v in sorted(aa.items() , key=lambda lowercase__ : x[0] )]
__lowercase : List[Any] = [v for _, v in sorted(aa.items() , key=lambda lowercase__ : x[0] )]
consistent &= self._compare_arg_caches(lowerCamelCase__ , lowerCamelCase__ )
else:
consistent &= aa == aa
return consistent
def snake_case ( self : List[str] , lowercase__ : Callable , lowercase__ : tuple , lowercase__ : int , ):
__lowercase : Optional[Any] = True
__lowercase : tuple = tree_map(lambda lowercase__ : a.shape if isinstance(lowerCamelCase__ , torch.Tensor ) else a , lowerCamelCase__ , lowerCamelCase__ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(lowerCamelCase__ )
__lowercase : Dict = self._compare_arg_caches(self.cached_arg_data , lowerCamelCase__ )
else:
# Otherwise, we can reuse the precomputed value
__lowercase : int = False
if not consistent:
__lowercase : Any = self._determine_favorable_chunk_size(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , )
__lowercase : str = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 718 |
"""simple docstring"""
from __future__ import annotations
def snake_case__ ( _lowerCamelCase, _lowerCamelCase = None ) ->list[list[str]]:
"""simple docstring"""
__lowercase : List[Any] = word_bank or []
# create a table
__lowercase : int = len(_lowerCamelCase ) + 1
__lowercase : list[list[list[str]]] = []
for _ in range(_lowerCamelCase ):
table.append([] )
# seed value
__lowercase : Any = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_lowerCamelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_lowerCamelCase )] == word:
__lowercase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_lowerCamelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_lowerCamelCase )]:
combination.reverse()
return table[len(_lowerCamelCase )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 281 | 0 |
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
_SCREAMING_SNAKE_CASE : int = logging.getLogger()
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = {}
snake_case_ = os.path.join(snake_case , "all_results.json" )
if os.path.exists(snake_case ):
with open(snake_case , "r" ) as f:
snake_case_ = json.load(snake_case )
else:
raise ValueError(f'can\'t find {path}' )
return results
_SCREAMING_SNAKE_CASE : List[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class _snake_case ( lowercase_ ):
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
import xla_spawn
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = F'\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(a__ , "argv" , a__ ):
snake_case_ = time()
xla_spawn.main()
snake_case_ = time()
snake_case_ = get_results(a__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
import xla_spawn
snake_case_ = "\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(a__ , "argv" , a__ ):
xla_spawn.main()
| 400 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[str] = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
_SCREAMING_SNAKE_CASE : Tuple = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
_SCREAMING_SNAKE_CASE : int = {
"abeja/gpt-neox-japanese-2.7b": 2048,
}
def UpperCamelCase_( snake_case : List[Any] , snake_case : List[str] ):
'''simple docstring'''
with open(snake_case , "r" , encoding="utf-8" ) as f:
snake_case_ = json.loads(f.read() )
snake_case_ = collections.OrderedDict()
snake_case_ = collections.OrderedDict()
snake_case_ = collections.OrderedDict()
with open(snake_case , "r" , encoding="utf-8" ) as f:
snake_case_ = f.readlines()
snake_case_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(snake_case ):
snake_case_ = b
snake_case_ = idx
for wd in b:
snake_case_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Union[str, Any] = VOCAB_FILES_NAMES
lowerCAmelCase_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : int = ["input_ids", "attention_mask"]
def __init__( self , a__ , a__ , a__="<|endoftext|>" , a__="<|endoftext|>" , a__="<|startoftext|>" , a__="<|endoftext|>" , a__=False , **a__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
unk_token=a__ , pad_token=a__ , bos_token=a__ , eos_token=a__ , do_clean_text=a__ , **a__ , )
if not os.path.isfile(a__ ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(a__ ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
snake_case_ = do_clean_text
snake_case_ , snake_case_ , snake_case_ , snake_case_ = load_vocab_and_emoji(a__ , a__ )
snake_case_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return len(self.raw_vocab )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder )
def lowerCAmelCase__ ( self , a__ ) -> Any:
'''simple docstring'''
return self.subword_tokenizer.tokenize(a__ , clean=self.do_clean_text )
def lowerCAmelCase__ ( self , a__ ) -> List[Any]:
'''simple docstring'''
return self.vocab.get(a__ , self.vocab.get(self.unk_token ) )
def lowerCAmelCase__ ( self , a__ ) -> List[Any]:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(a__ )
def lowerCAmelCase__ ( self , a__ ) -> Any:
'''simple docstring'''
snake_case_ = "".join(a__ ).strip()
return out_string
def lowerCAmelCase__ ( self , a__ ) -> List[int]:
'''simple docstring'''
snake_case_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a__ , add_special_tokens=a__ ) + [self.eos_token_id] )
if len(a__ ) > self.model_max_length:
snake_case_ = input_ids[-self.model_max_length :]
return input_ids
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ = 0
if os.path.isdir(a__ ):
snake_case_ = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
snake_case_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
snake_case_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(a__ , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
" Please check that the vocabulary is not corrupted!" )
snake_case_ = token_index
writer.write(",".join(a__ ) + "\n" )
index += 1
with open(a__ , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , a__ )
return vocab_file, emoji_file
class _snake_case ( lowercase_ ):
def __init__( self , a__ , a__ , a__ ) -> Any:
'''simple docstring'''
snake_case_ = vocab # same as swe
snake_case_ = ids_to_tokens # same as bpe
snake_case_ = emoji
snake_case_ = np.max([len(a__ ) for w in self.vocab.keys()] )
snake_case_ = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
snake_case_ = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
snake_case_ = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
snake_case_ = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
snake_case_ = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
snake_case_ = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
snake_case_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
snake_case_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
snake_case_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self ) -> List[Any]:
'''simple docstring'''
return len(self.ids_to_tokens )
def lowerCAmelCase__ ( self , a__ ) -> str:
'''simple docstring'''
snake_case_ = self.content_repattera.sub("<URL>" , a__ )
snake_case_ = self.content_repattera.sub("<EMAIL>" , a__ )
snake_case_ = self.content_repattera.sub("<TEL>" , a__ )
snake_case_ = self.content_repattera.sub("<DATE>" , a__ )
snake_case_ = self.content_repattera.sub("<DATE>" , a__ )
snake_case_ = self.content_repattera.sub("<PRICE>" , a__ )
snake_case_ = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
snake_case_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def lowerCAmelCase__ ( self , a__ , a__=False ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = text.replace(" " , "<SP>" )
snake_case_ = text.replace(" " , "<SP>" )
snake_case_ = text.replace("\r\n" , "<BR>" )
snake_case_ = text.replace("\n" , "<BR>" )
snake_case_ = text.replace("\r" , "<BR>" )
snake_case_ = text.replace("\t" , "<TAB>" )
snake_case_ = text.replace("—" , "ー" )
snake_case_ = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
snake_case_ = text.replace(a__ , a__ )
if clean:
snake_case_ = self.clean_text(a__ )
def check_simbol(a__ ):
snake_case_ = x.encode()
if len(a__ ) == 1 and len(a__ ) == 2:
snake_case_ = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC_2_A_1 and c <= 0XC_2_B_F)
or (c >= 0XC_7_8_0 and c <= 0XC_7_8_3)
or (c >= 0XC_A_B_9 and c <= 0XC_B_B_F)
or (c >= 0XC_C_8_0 and c <= 0XC_D_A_2)
):
return True
return False
def checkuae(a__ ):
snake_case_ = x.encode()
if len(a__ ) == 1 and len(a__ ) == 3:
snake_case_ = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE_2_8_0_8_0 and c <= 0XE_2_B_0_7_F:
return True
return False
snake_case_ = 0
snake_case_ = []
while pos < len(a__ ):
snake_case_ = min(len(a__ ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
snake_case_ = [] # (token_id, token, pos)
for e in range(a__ , a__ , -1 ):
snake_case_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(a__ ) > 2:
snake_case_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(a__ ) > 0:
# the smallest token_id is adopted
snake_case_ , snake_case_ , snake_case_ = sorted(a__ , key=lambda a__ : x[0] )[0]
result.append(a__ )
snake_case_ = e
else:
snake_case_ = pos + 1
snake_case_ = text[pos:end]
if check_simbol(a__ ):
result.append("<KIGOU>" )
elif checkuae(a__ ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
snake_case_ = end
return result
def lowerCAmelCase__ ( self , a__ , a__="\n" ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = []
snake_case_ = []
snake_case_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(a__ ) > 0:
words.append(bytearray(a__ ).decode("utf-8" , errors="replace" ) )
snake_case_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(a__ )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(a__ )
if len(a__ ) > 0:
words.append(bytearray(a__ ).decode("utf-8" , errors="replace" ) )
snake_case_ = "".join(a__ )
return text
| 400 | 1 |
"""simple docstring"""
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : Tuple ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = 0
while b > 0:
if b & 1:
lowerCAmelCase_ :Optional[int] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 256 |
"""simple docstring"""
from __future__ import annotations
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A=None ) -> Tuple:
lowerCAmelCase_ :Optional[int] = data
lowerCAmelCase_ :List[Any] = None
def __repr__( self ) -> Union[str, Any]:
lowerCAmelCase_ :int = []
lowerCAmelCase_ :int = self
while temp:
string_rep.append(f"""{temp.data}""" )
lowerCAmelCase_ :List[str] = temp.next
return "->".join(__A )
def _snake_case ( lowercase__ : list ) -> Union[str, Any]:
'''simple docstring'''
if not elements_list:
raise Exception("""The Elements List is empty""" )
lowerCAmelCase_ :int = Node(elements_list[0] )
for i in range(1 , len(lowercase__ ) ):
lowerCAmelCase_ :Tuple = Node(elements_list[i] )
lowerCAmelCase_ :Union[str, Any] = current.next
return head
def _snake_case ( lowercase__ : Node ) -> None:
'''simple docstring'''
if head_node is not None and isinstance(lowercase__ , lowercase__ ):
print_reverse(head_node.next )
print(head_node.data )
def _snake_case ( ) -> Optional[int]:
'''simple docstring'''
from doctest import testmod
testmod()
lowerCAmelCase_ :Union[str, Any] = make_linked_list([1_4, 5_2, 1_4, 1_2, 4_3] )
print("""Linked List:""" )
print(lowercase__ )
print("""Elements in Reverse:""" )
print_reverse(lowercase__ )
if __name__ == "__main__":
main()
| 256 | 1 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowerCamelCase__ = (3, 9, -11, 0, 7, 5, 1, -1)
lowerCamelCase__ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class snake_case__ :
'''simple docstring'''
lowerCamelCase : Dict = 42
lowerCamelCase : Optional[Any] = 42
class snake_case__ :
'''simple docstring'''
def __init__( self , a__ ) -> None:
'''simple docstring'''
__snake_case :Node | None = None
for i in sorted(a__ , reverse=a__ ):
__snake_case :Optional[int] = Node(a__ , self.head )
def __iter__( self ) -> Iterator[int]:
'''simple docstring'''
__snake_case :Any = self.head
while node:
yield node.data
__snake_case :Optional[int] = node.next_node
def __len__( self ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self ) -> str:
'''simple docstring'''
return " -> ".join([str(a__ ) for node in self] )
def UpperCamelCase ( snake_case__ : SortedLinkedList ,snake_case__ : SortedLinkedList ):
'''simple docstring'''
return SortedLinkedList(list(__snake_case ) + list(__snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 455 |
"""simple docstring"""
import numpy as np
def _snake_case ( __snake_case : np.ndarray ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def _snake_case ( __snake_case : np.ndarray ):
"""simple docstring"""
return vector * sigmoid(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 0 |
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_UpperCAmelCase : Optional[int] = [{'type': 'code', 'content': INSTALL_CONTENT}]
_UpperCAmelCase : Dict = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 711 |
_UpperCAmelCase : str = [0, 2, 4, 6, 8]
_UpperCAmelCase : Any = [1, 3, 5, 7, 9]
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
snake_case_ = 0
for digit in range(10 ):
snake_case_ = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , UpperCamelCase__ , UpperCamelCase__ )
return result
snake_case_ = 0
for digita in range(10 ):
snake_case_ = digita
if (remainder + digita) % 2 == 0:
snake_case_ = ODD_DIGITS
else:
snake_case_ = EVEN_DIGITS
for digita in other_parity_digits:
snake_case_ = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , UpperCamelCase__ , UpperCamelCase__ , )
return result
def __lowerCamelCase ( UpperCamelCase__ = 9 ):
'''simple docstring'''
snake_case_ = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(UpperCamelCase__ , 0 , [0] * length , UpperCamelCase__ )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 108 | 0 |
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
UpperCAmelCase = logging.get_logger(__name__)
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[str] = ["""input_features""", """attention_mask"""]
def __init__( self , snake_case=80 , snake_case=1_6000 , snake_case=0.0 , snake_case=10 , snake_case=25 , snake_case="hamming_window" , snake_case=32_768.0 , snake_case=0.97 , snake_case=1.0 , snake_case=True , snake_case=True , snake_case=False , **snake_case , ):
super().__init__(feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , **snake_case )
lowercase = feature_size
lowercase = sampling_rate
lowercase = padding_value
lowercase = hop_length
lowercase = win_length
lowercase = frame_signal_scale
lowercase = preemphasis_coeff
lowercase = mel_floor
lowercase = normalize_means
lowercase = normalize_vars
lowercase = win_function
lowercase = return_attention_mask
lowercase = win_length * sampling_rate // 1000
lowercase = hop_length * sampling_rate // 1000
lowercase = optimal_fft_length(self.sample_size )
lowercase = (self.n_fft // 2) + 1
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if self.win_function == "hamming_window":
lowercase = window_function(window_length=self.sample_size , name=self.win_function , periodic=snake_case )
else:
lowercase = window_function(window_length=self.sample_size , name=self.win_function )
lowercase = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
lowercase = spectrogram(
one_waveform * self.frame_signal_scale , window=snake_case , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=snake_case , preemphasis=self.preemphasis_coeff , mel_filters=snake_case , mel_floor=self.mel_floor , log_mel='log' , )
return msfc_features.T
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
# make sure we normalize float32 arrays
if self.normalize_means:
lowercase = x[:input_length].mean(axis=0 )
lowercase = np.subtract(snake_case , snake_case )
if self.normalize_vars:
lowercase = x[:input_length].std(axis=0 )
lowercase = np.divide(snake_case , snake_case )
if input_length < x.shape[0]:
lowercase = padding_value
# make sure array is in float32
lowercase = x.astype(np.floataa )
return x
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(snake_case , snake_case , self.padding_value ) for x, n in zip(snake_case , snake_case )]
def __call__( self , snake_case , snake_case = False , snake_case = None , snake_case = False , snake_case = None , snake_case = None , snake_case = None , snake_case = None , **snake_case , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowercase = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
lowercase = is_batched_numpy or (
isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case , np.ndarray ):
lowercase = np.asarray(snake_case , dtype=np.floataa )
elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase = [raw_speech]
# extract fbank features
lowercase = [self._extract_mfsc_features(snake_case ) for one_waveform in raw_speech]
# convert into correct format for padding
lowercase = BatchFeature({'input_features': features} )
lowercase = self.pad(
snake_case , padding=snake_case , max_length=snake_case , truncation=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , **snake_case , )
# make sure list is in array format
lowercase = padded_inputs.get('input_features' )
if isinstance(input_features[0] , snake_case ):
lowercase = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_features]
lowercase = padded_inputs.get('attention_mask' )
if attention_mask is not None:
lowercase = [np.asarray(snake_case , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
lowercase = (
np.array(snake_case , dtype=np.intaa )
if self._get_padding_strategies(snake_case , max_length=snake_case ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
lowercase = self.normalize(
padded_inputs['input_features'] , attention_mask=snake_case )
if return_tensors is not None:
lowercase = padded_inputs.convert_to_tensors(snake_case )
return padded_inputs
| 84 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : List[Any] = {
'''configuration_luke''': ['''LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LukeConfig'''],
'''tokenization_luke''': ['''LukeTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
'''LUKE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LukeForEntityClassification''',
'''LukeForEntityPairClassification''',
'''LukeForEntitySpanClassification''',
'''LukeForMultipleChoice''',
'''LukeForQuestionAnswering''',
'''LukeForSequenceClassification''',
'''LukeForTokenClassification''',
'''LukeForMaskedLM''',
'''LukeModel''',
'''LukePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
A__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 286 | 0 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_UpperCAmelCase = logging.getLogger(__name__)
def _lowerCamelCase ( _a , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = False , ):
"""simple docstring"""
_lowerCamelCase = bnb_quantization_config.load_in_abit
_lowerCamelCase = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
_lowerCamelCase = []
# custom device map
if isinstance(_a , _a ) and len(device_map.keys() ) > 1:
_lowerCamelCase = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
_lowerCamelCase = get_keys_to_not_convert(_a )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_a )
_lowerCamelCase = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
_lowerCamelCase = []
_lowerCamelCase = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_a )
# compatibility with peft
_lowerCamelCase = load_in_abit
_lowerCamelCase = load_in_abit
_lowerCamelCase = get_parameter_device(_a )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
_lowerCamelCase = replace_with_bnb_layers(_a , _a , modules_to_not_convert=_a )
# convert param to the right dtype
_lowerCamelCase = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
_lowerCamelCase = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
_lowerCamelCase = getattr(_a , _a , _a )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_a ):
param.to(_a )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
_lowerCamelCase = replace_with_bnb_layers(
_a , _a , modules_to_not_convert=_a )
_lowerCamelCase = get_quantized_model_device_map(
_a , _a , _a , max_memory=_a , no_split_module_classes=_a , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
_lowerCamelCase = True
_lowerCamelCase = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
_a , _a , _a , dtype=bnb_quantization_config.torch_dtype , offload_folder=_a , offload_state_dict=_a , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_a , device_map=_a , offload_dir=_a )
def _lowerCamelCase ( _a , _a , _a=None , _a=None , _a=None ):
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
_lowerCamelCase = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(_a , _a ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
_lowerCamelCase = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
_lowerCamelCase = {}
_lowerCamelCase = special_dtypes
_lowerCamelCase = no_split_module_classes
_lowerCamelCase = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
_lowerCamelCase = get_balanced_memory(
_a , low_zero=(device_map == '''balanced_low_0''') , max_memory=_a , **_a , )
_lowerCamelCase = max_memory
_lowerCamelCase = infer_auto_device_map(_a , **_a )
if isinstance(_a , _a ):
# check if don't have any quantized module on the cpu
_lowerCamelCase = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
_lowerCamelCase = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def _lowerCamelCase ( _a , _a , _a=None , _a=None ):
"""simple docstring"""
if modules_to_not_convert is None:
_lowerCamelCase = []
_lowerCamelCase , _lowerCamelCase = _replace_with_bnb_layers(
_a , _a , _a , _a )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def _lowerCamelCase ( _a , _a , _a=None , _a=None , ):
"""simple docstring"""
_lowerCamelCase = False
for name, module in model.named_children():
if current_key_name is None:
_lowerCamelCase = []
current_key_name.append(_a )
if isinstance(_a , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
_lowerCamelCase = '''.'''.join(_a )
_lowerCamelCase = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
_lowerCamelCase = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
_lowerCamelCase = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_a , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
_lowerCamelCase = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
_lowerCamelCase = module.weight.data
if module.bias is not None:
_lowerCamelCase = module.bias.data
bnb_module.requires_grad_(_a )
setattr(_a , _a , _a )
_lowerCamelCase = True
if len(list(module.children() ) ) > 0:
_lowerCamelCase , _lowerCamelCase = _replace_with_bnb_layers(
_a , _a , _a , _a )
_lowerCamelCase = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _lowerCamelCase ( _a ):
"""simple docstring"""
with init_empty_weights():
_lowerCamelCase = deepcopy(_a ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
_lowerCamelCase = find_tied_parameters(_a )
# For compatibility with Accelerate < 0.18
if isinstance(_a , _a ):
_lowerCamelCase = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_lowerCamelCase = sum(_a , [] )
_lowerCamelCase = len(_a ) > 0
# Check if it is a base model
_lowerCamelCase = False
if hasattr(_a , '''base_model_prefix''' ):
_lowerCamelCase = not hasattr(_a , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_lowerCamelCase = list(model.named_children() )
_lowerCamelCase = [list_modules[-1][0]]
# add last module together with tied weights
_lowerCamelCase = set(_a ) - set(_a )
_lowerCamelCase = list(set(_a ) ) + list(_a )
# remove ".weight" from the keys
_lowerCamelCase = ['''.weight''', '''.bias''']
_lowerCamelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_lowerCamelCase = name.replace(_a , '''''' )
filtered_module_names.append(_a )
return filtered_module_names
def _lowerCamelCase ( _a ):
"""simple docstring"""
for m in model.modules():
if isinstance(_a , bnb.nn.Linearabit ):
return True
return False
def _lowerCamelCase ( _a ):
"""simple docstring"""
return next(parameter.parameters() ).device
def _lowerCamelCase ( _a , _a , _a , _a , _a , _a , _a ):
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(_a , _a , 0 , dtype=_a , value=_a )
_lowerCamelCase = param_name
_lowerCamelCase = model
if "." in tensor_name:
_lowerCamelCase = tensor_name.split('''.''' )
for split in splits[:-1]:
_lowerCamelCase = getattr(_a , _a )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
_lowerCamelCase = new_module
_lowerCamelCase = splits[-1]
# offload weights
_lowerCamelCase = False
offload_weight(module._parameters[tensor_name] , _a , _a , index=_a )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , _a , index=_a , )
else:
offload_weight(_a , _a , _a , index=_a )
offload_weight(_a , param_name.replace('''weight''' , '''SCB''' ) , _a , index=_a )
set_module_tensor_to_device(_a , _a , '''meta''' , dtype=_a , value=torch.empty(*param.size() ) )
| 297 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_UpperCAmelCase = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def _lowerCamelCase ( _a ):
"""simple docstring"""
_lowerCamelCase = test_results.split(''' ''' )
_lowerCamelCase = 0
_lowerCamelCase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_lowerCamelCase = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(_a ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _lowerCamelCase ( _a ):
"""simple docstring"""
_lowerCamelCase = {}
_lowerCamelCase = None
_lowerCamelCase = False
for line in failures_short_lines.split('''\n''' ):
if re.search(R'''_ \[doctest\]''' , _a ):
_lowerCamelCase = True
_lowerCamelCase = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
_lowerCamelCase = line
_lowerCamelCase = False
return failures
class __magic_name__ :
"""simple docstring"""
def __init__( self , a__ , a__ ):
_lowerCamelCase = title
_lowerCamelCase = doc_test_results['''time_spent'''].split(''',''' )[0]
_lowerCamelCase = doc_test_results['''success''']
_lowerCamelCase = doc_test_results['''failures''']
_lowerCamelCase = self.n_success + self.n_failures
# Failures and success of the modeling tests
_lowerCamelCase = doc_test_results
@property
def _UpperCAmelCase ( self ):
_lowerCamelCase = [self._time_spent]
_lowerCamelCase = 0
for time in time_spent:
_lowerCamelCase = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(a__ ) == 1:
_lowerCamelCase = [0, 0, time_parts[0]]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f'''{int(a__ )}h{int(a__ )}m{int(a__ )}s'''
@property
def _UpperCAmelCase ( self ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCAmelCase ( self ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def _UpperCAmelCase ( self ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
f''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def _UpperCAmelCase ( self ):
_lowerCamelCase = 40
_lowerCamelCase = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(a__ , a__ )}
_lowerCamelCase = ''''''
for category, failures in category_failures.items():
if len(a__ ) == 0:
continue
if report != "":
report += "\n\n"
report += f'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(a__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def _UpperCAmelCase ( self ):
_lowerCamelCase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(a__ )
@staticmethod
def _UpperCAmelCase ( ):
_lowerCamelCase = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(a__ )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=a__ , )
def _UpperCAmelCase ( self ):
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
_lowerCamelCase = f'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else '''All tests passed.'''
_lowerCamelCase = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=a__ , )
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ ):
_lowerCamelCase = ''''''
for key, value in failures.items():
_lowerCamelCase = value[:2_00] + ''' [Truncated]''' if len(a__ ) > 2_50 else value
failures_text += f'''*{key}*\n_{value}_\n\n'''
_lowerCamelCase = job_name
_lowerCamelCase = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
_lowerCamelCase = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCAmelCase ( self ):
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
_lowerCamelCase = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
_lowerCamelCase = sorted(self.doc_test_results.items() , key=lambda a__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
_lowerCamelCase = f'''*Num failures* :{len(job_result['failed'] )} \n'''
_lowerCamelCase = job_result['''failures''']
_lowerCamelCase = self.get_reply_blocks(a__ , a__ , a__ , text=a__ )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=f'''Results for {job}''' , blocks=a__ , thread_ts=self.thread_ts['''ts'''] , )
time.sleep(1 )
def _lowerCamelCase ( ):
"""simple docstring"""
_lowerCamelCase = os.environ['''GITHUB_RUN_ID''']
_lowerCamelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
_lowerCamelCase = requests.get(_a ).json()
_lowerCamelCase = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
_lowerCamelCase = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(_a ):
_lowerCamelCase = requests.get(url + F'''&page={i + 2}''' ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , _a )
return {}
def _lowerCamelCase ( _a ):
"""simple docstring"""
_lowerCamelCase = {}
if os.path.exists(_a ):
_lowerCamelCase = os.listdir(_a )
for file in files:
try:
with open(os.path.join(_a , _a ) , encoding='''utf-8''' ) as f:
_lowerCamelCase = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(_a , _a )}.''' ) from e
return _artifact
def _lowerCamelCase ( ):
"""simple docstring"""
class __magic_name__ :
"""simple docstring"""
def __init__( self , a__ ):
_lowerCamelCase = name
_lowerCamelCase = []
def __str__( self ):
return self.name
def _UpperCAmelCase ( self , a__ ):
self.paths.append({'''name''': self.name, '''path''': path} )
_lowerCamelCase = {}
_lowerCamelCase = filter(os.path.isdir , os.listdir() )
for directory in directories:
_lowerCamelCase = directory
if artifact_name not in _available_artifacts:
_lowerCamelCase = Artifact(_a )
_available_artifacts[artifact_name].add_path(_a )
return _available_artifacts
if __name__ == "__main__":
_UpperCAmelCase = get_job_links()
_UpperCAmelCase = retrieve_available_artifacts()
_UpperCAmelCase = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_UpperCAmelCase = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_UpperCAmelCase = github_actions_job_links.get("run_doctests")
_UpperCAmelCase = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
_UpperCAmelCase = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = handle_test_results(artifact["stats"])
_UpperCAmelCase = failed
_UpperCAmelCase = success
_UpperCAmelCase = time_spent[1:-1] + ", "
_UpperCAmelCase = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
_UpperCAmelCase = line.replace("FAILED ", "")
_UpperCAmelCase = line.split()[0].replace("\n", "")
if "::" in line:
_UpperCAmelCase , _UpperCAmelCase = line.split("::")
else:
_UpperCAmelCase , _UpperCAmelCase = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_UpperCAmelCase = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_UpperCAmelCase = all_failures[test] if test in all_failures else "N/A"
_UpperCAmelCase = failure
break
_UpperCAmelCase = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 297 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"Salesforce/blip-vqa-base": "https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json",
"Salesforce/blip-vqa-capfit-large": (
"https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-base": (
"https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-large": (
"https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"
),
"Salesforce/blip-itm-base-coco": "https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json",
"Salesforce/blip-itm-large-coco": "https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json",
"Salesforce/blip-itm-base-flikr": "https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json",
"Salesforce/blip-itm-large-flikr": (
"https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"
),
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "blip_text_model"
def __init__(self : List[str] , UpperCAmelCase_ : Any=30_524 , UpperCAmelCase_ : Any=768 , UpperCAmelCase_ : List[str]=768 , UpperCAmelCase_ : List[Any]=3_072 , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : Optional[int]=12 , UpperCAmelCase_ : Any=8 , UpperCAmelCase_ : Union[str, Any]=512 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Tuple=1E-1_2 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Dict=30_522 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : List[Any]=102 , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Any=True , **UpperCAmelCase_ : Optional[int] , ) ->Optional[int]:
'''simple docstring'''
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , sep_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase__: Union[str, Any] =vocab_size
lowerCamelCase__: List[str] =hidden_size
lowerCamelCase__: int =encoder_hidden_size
lowerCamelCase__: List[Any] =intermediate_size
lowerCamelCase__: List[str] =projection_dim
lowerCamelCase__: str =hidden_dropout_prob
lowerCamelCase__: int =num_hidden_layers
lowerCamelCase__: Union[str, Any] =num_attention_heads
lowerCamelCase__: List[str] =max_position_embeddings
lowerCamelCase__: Any =layer_norm_eps
lowerCamelCase__: Any =hidden_act
lowerCamelCase__: Tuple =initializer_range
lowerCamelCase__: Dict =attention_probs_dropout_prob
lowerCamelCase__: Dict =is_decoder
lowerCamelCase__: Dict =use_cache
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Any , UpperCAmelCase_ : Union[str, os.PathLike] , **UpperCAmelCase_ : Any) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__: str =cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_)
# get the text config dict if we are loading from BlipConfig
if config_dict.get("model_type") == "blip":
lowerCamelCase__: List[str] =config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "blip_vision_model"
def __init__(self : List[str] , UpperCAmelCase_ : Tuple=768 , UpperCAmelCase_ : Tuple=3_072 , UpperCAmelCase_ : List[Any]=512 , UpperCAmelCase_ : Optional[int]=12 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : Union[str, Any]=384 , UpperCAmelCase_ : Any=16 , UpperCAmelCase_ : Union[str, Any]="gelu" , UpperCAmelCase_ : str=1E-5 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : str=1E-1_0 , **UpperCAmelCase_ : Union[str, Any] , ) ->Any:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
lowerCamelCase__: List[Any] =hidden_size
lowerCamelCase__: List[str] =intermediate_size
lowerCamelCase__: Dict =projection_dim
lowerCamelCase__: Any =num_hidden_layers
lowerCamelCase__: Tuple =num_attention_heads
lowerCamelCase__: Optional[int] =patch_size
lowerCamelCase__: Optional[int] =image_size
lowerCamelCase__: Any =initializer_range
lowerCamelCase__: str =attention_dropout
lowerCamelCase__: str =layer_norm_eps
lowerCamelCase__: Dict =hidden_act
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Tuple , UpperCAmelCase_ : Union[str, os.PathLike] , **UpperCAmelCase_ : List[Any]) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__: Dict =cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_)
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("model_type") == "blip":
lowerCamelCase__: Optional[int] =config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "blip"
lowercase_ = True
def __init__(self : Any , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Union[str, Any]=512 , UpperCAmelCase_ : str=2.6592 , UpperCAmelCase_ : List[Any]=256 , **UpperCAmelCase_ : Tuple , ) ->int:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
if text_config is None:
lowerCamelCase__: Optional[int] ={}
logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values.")
if vision_config is None:
lowerCamelCase__: Optional[int] ={}
logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.")
lowerCamelCase__: List[str] =BlipTextConfig(**UpperCAmelCase_)
lowerCamelCase__: Optional[int] =BlipVisionConfig(**UpperCAmelCase_)
lowerCamelCase__: List[str] =self.vision_config.hidden_size
lowerCamelCase__: Optional[Any] =projection_dim
lowerCamelCase__: List[str] =logit_scale_init_value
lowerCamelCase__: str =1.0
lowerCamelCase__: Tuple =0.02
lowerCamelCase__: int =image_text_hidden_size
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Any , UpperCAmelCase_ : BlipTextConfig , UpperCAmelCase_ : BlipVisionConfig , **UpperCAmelCase_ : str) ->Any:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: str =copy.deepcopy(self.__dict__)
lowerCamelCase__: Dict =self.text_config.to_dict()
lowerCamelCase__: List[str] =self.vision_config.to_dict()
lowerCamelCase__: int =self.__class__.model_type
return output
| 59 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_=None , A_=None ):
if attention_mask is None:
lowerCAmelCase__ : Optional[int] = tf.cast(tf.math.not_equal(A_ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase__ = OPTConfig
lowercase__ = {}
lowercase__ = "gelu"
def __init__( self : str ,lowercase_ : List[Any] ,lowercase_ : Optional[int]=1_3 ,lowercase_ : str=7 ,lowercase_ : int=True ,lowercase_ : List[str]=False ,lowercase_ : Optional[int]=9_9 ,lowercase_ : Union[str, Any]=1_6 ,lowercase_ : Any=2 ,lowercase_ : Tuple=4 ,lowercase_ : Union[str, Any]=4 ,lowercase_ : int="gelu" ,lowercase_ : Tuple=0.1 ,lowercase_ : Optional[Any]=0.1 ,lowercase_ : List[str]=2_0 ,lowercase_ : Union[str, Any]=2 ,lowercase_ : Union[str, Any]=1 ,lowercase_ : Tuple=0 ,lowercase_ : List[Any]=1_6 ,lowercase_ : Union[str, Any]=1_6 ,):
lowerCAmelCase__ : Dict = parent
lowerCAmelCase__ : Tuple = batch_size
lowerCAmelCase__ : Dict = seq_length
lowerCAmelCase__ : Optional[Any] = is_training
lowerCAmelCase__ : Optional[Any] = use_labels
lowerCAmelCase__ : Union[str, Any] = vocab_size
lowerCAmelCase__ : Optional[int] = hidden_size
lowerCAmelCase__ : int = num_hidden_layers
lowerCAmelCase__ : Dict = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = intermediate_size
lowerCAmelCase__ : Union[str, Any] = hidden_act
lowerCAmelCase__ : int = hidden_dropout_prob
lowerCAmelCase__ : Any = attention_probs_dropout_prob
lowerCAmelCase__ : int = max_position_embeddings
lowerCAmelCase__ : Any = eos_token_id
lowerCAmelCase__ : List[Any] = pad_token_id
lowerCAmelCase__ : Any = bos_token_id
lowerCAmelCase__ : Dict = embed_dim
lowerCAmelCase__ : int = word_embed_proj_dim
lowerCAmelCase__ : Union[str, Any] = False
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
lowerCAmelCase__ : List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
lowerCAmelCase__ : int = tf.concat([input_ids, eos_tensor] ,axis=1 )
lowerCAmelCase__ : Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_id=self.eos_token_id ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,embed_dim=self.embed_dim ,word_embed_proj_dim=self.word_embed_proj_dim ,is_encoder_decoder=lowercase_ ,**self.config_updates ,)
lowerCAmelCase__ : Optional[Any] = prepare_opt_inputs_dict(lowercase_ ,lowercase_ )
return config, inputs_dict
def __lowerCAmelCase ( self : List[Any] ,lowercase_ : int ,lowercase_ : List[Any] ):
lowerCAmelCase__ : Tuple = TFOPTModel(config=lowercase_ )
lowerCAmelCase__ : Tuple = inputs_dict['''input_ids''']
lowerCAmelCase__ : Optional[int] = input_ids[:1, :]
lowerCAmelCase__ : List[str] = inputs_dict['''attention_mask'''][:1, :]
lowerCAmelCase__ : Any = 1
# first forward pass
lowerCAmelCase__ : Dict = model(lowercase_ ,attention_mask=lowercase_ ,use_cache=lowercase_ )
lowerCAmelCase__ ,lowerCAmelCase__ : Any = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase__ : List[Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
lowerCAmelCase__ : Tuple = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
lowerCAmelCase__ : Optional[Any] = tf.concat([input_ids, next_tokens] ,axis=-1 )
lowerCAmelCase__ : Tuple = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
lowerCAmelCase__ : Any = model(lowercase_ ,attention_mask=lowercase_ )[0]
lowerCAmelCase__ : Any = model(lowercase_ ,attention_mask=lowercase_ ,past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
lowerCAmelCase__ : str = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
lowerCAmelCase__ : List[str] = output_from_no_past[:, -3:, random_slice_idx]
lowerCAmelCase__ : Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ ,lowercase_ ,rtol=1E-3 )
@require_tf
class SCREAMING_SNAKE_CASE ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
lowercase__ = (TFOPTForCausalLM,) if is_tf_available() else ()
lowercase__ = (
{"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = 10
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : List[Any] = TFOPTModelTester(self )
lowerCAmelCase__ : Optional[Any] = ConfigTester(self ,config_class=lowercase_ )
def __lowerCAmelCase ( self : str ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
def __lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowercase_ : Optional[Any] ,lowercase_ : List[Any] ):
if hasattr(lowercase_ ,'''weight''' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowercase_ ,'''weight''' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
lowerCAmelCase__ : Optional[int] = model_class(config=lowercase_ )
lowerCAmelCase__ : List[Any] = _get_word_embedding_weight(lowercase_ ,model.get_input_embeddings() )
lowerCAmelCase__ : Optional[Any] = _get_word_embedding_weight(lowercase_ ,model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowercase_ )
lowerCAmelCase__ : Union[str, Any] = _get_word_embedding_weight(lowercase_ ,model.get_input_embeddings() )
lowerCAmelCase__ : List[Any] = _get_word_embedding_weight(lowercase_ ,model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowerCAmelCase__ : Optional[Any] = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] ,lowercase_ )
# check that weights remain the same after resizing
lowerCAmelCase__ : Optional[Any] = True
for pa, pa in zip(old_input_embeddings.value() ,new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCAmelCase__ : str = False
self.assertTrue(lowercase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] ,lowercase_ )
lowerCAmelCase__ : Dict = True
for pa, pa in zip(old_output_embeddings.value() ,new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCAmelCase__ : List[Any] = False
self.assertTrue(lowercase_ )
def __SCREAMING_SNAKE_CASE ( A_ ):
return tf.constant(A_ , dtype=tf.intaa )
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = 99
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Dict = tf.ones((4, 1) ,dtype=tf.intaa ) * 2
lowerCAmelCase__ : Any = tf.concat([ids_tensor((4, 6) ,self.vocab_size - 3 ) + 3, eos_column_vector] ,axis=1 )
lowerCAmelCase__ : str = input_ids.shape[0]
lowerCAmelCase__ : str = OPTConfig(
vocab_size=self.vocab_size ,hidden_size=2_4 ,num_hidden_layers=2 ,num_attention_heads=2 ,ffn_dim=3_2 ,max_position_embeddings=4_8 ,eos_token_id=2 ,pad_token_id=1 ,bos_token_id=0 ,)
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowerCAmelCase ( self : Optional[Any] ):
lowerCAmelCase__ : Tuple = TFOPTModel.from_pretrained('''facebook/opt-350m''' )
lowerCAmelCase__ : str = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
lowerCAmelCase__ : List[Any] = tf.not_equal(lowercase_ ,model.config.pad_token_id )
with tf.GradientTape():
lowerCAmelCase__ : List[str] = model(input_ids=lowercase_ ,attention_mask=lowercase_ ).last_hidden_state
lowerCAmelCase__ : Optional[int] = (1, 1_1, 5_1_2)
self.assertEqual(output.shape ,lowercase_ )
lowerCAmelCase__ : Any = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] ,lowercase_ ,atol=4E-3 ) )
lowerCAmelCase__ : Any = tf.function(lowercase_ ,jit_compile=lowercase_ )
lowerCAmelCase__ : Optional[Any] = xla_generate(lowercase_ ,lowercase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] ,lowercase_ ,atol=4E-2 ) )
@require_tf
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Optional[Any] ):
super().setUp()
lowerCAmelCase__ : Optional[int] = '''facebook/opt-350m'''
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Optional[int] = TFOPTForCausalLM.from_pretrained(self.path_model )
lowerCAmelCase__ : Union[str, Any] = GPTaTokenizer.from_pretrained(self.path_model )
lowerCAmelCase__ : int = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowerCAmelCase__ : Optional[Any] = tokenizer(lowercase_ ,return_tensors='''tf''' ,padding=lowercase_ ,add_special_tokens=lowercase_ )
lowerCAmelCase__ : int = tf.math.reduce_mean(model(inputs.input_ids ,attention_mask=inputs.attention_mask )[0] ,axis=-1 )
lowerCAmelCase__ : List[str] = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(lowercase_ ,lowercase_ ,atol=1E-4 ) )
lowerCAmelCase__ : int = tf.function(lowercase_ ,jit_compile=lowercase_ )
lowerCAmelCase__ : Tuple = tf.math.reduce_mean(xla_generate(inputs.input_ids ,attention_mask=inputs.attention_mask )[0] ,axis=-1 )
self.assertTrue(np.allclose(lowercase_ ,lowercase_ ,atol=1E-4 ) )
@require_tf
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self : List[str] ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Union[str, Any] = '''facebook/opt-125m'''
lowerCAmelCase__ : str = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
lowerCAmelCase__ : Any = []
lowerCAmelCase__ : int = GPTaTokenizer.from_pretrained(lowercase_ )
lowerCAmelCase__ : int = TFOPTForCausalLM.from_pretrained(lowercase_ )
for prompt in self.prompts:
lowerCAmelCase__ : Union[str, Any] = tokenizer(lowercase_ ,return_tensors='''tf''' ).input_ids
lowerCAmelCase__ : Any = model.generate(lowercase_ ,max_length=1_0 )
lowerCAmelCase__ : Optional[int] = tokenizer.batch_decode(lowercase_ ,skip_special_tokens=lowercase_ )
predicted_outputs += generated_string
self.assertListEqual(lowercase_ ,lowercase_ )
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Union[str, Any] = '''facebook/opt-350m'''
lowerCAmelCase__ : Optional[Any] = GPTaTokenizer.from_pretrained(lowercase_ )
lowerCAmelCase__ : Optional[Any] = TFOPTForCausalLM.from_pretrained(lowercase_ )
lowerCAmelCase__ : Optional[Any] = '''left'''
# use different length sentences to test batching
lowerCAmelCase__ : Dict = [
'''Hello, my dog is a little''',
'''Today, I''',
]
lowerCAmelCase__ : Union[str, Any] = tokenizer(lowercase_ ,return_tensors='''tf''' ,padding=lowercase_ )
lowerCAmelCase__ : Optional[Any] = inputs['''input_ids''']
lowerCAmelCase__ : str = model.generate(input_ids=lowercase_ ,attention_mask=inputs['''attention_mask'''] )
lowerCAmelCase__ : Optional[Any] = tokenizer(sentences[0] ,return_tensors='''tf''' ).input_ids
lowerCAmelCase__ : Optional[Any] = model.generate(input_ids=lowercase_ )
lowerCAmelCase__ : Optional[int] = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] ,tf.intaa ) )
lowerCAmelCase__ : Any = tokenizer(sentences[1] ,return_tensors='''tf''' ).input_ids
lowerCAmelCase__ : Optional[int] = model.generate(input_ids=lowercase_ ,max_length=model.config.max_length - num_paddings )
lowerCAmelCase__ : Any = tokenizer.batch_decode(lowercase_ ,skip_special_tokens=lowercase_ )
lowerCAmelCase__ : List[Any] = tokenizer.decode(output_non_padded[0] ,skip_special_tokens=lowercase_ )
lowerCAmelCase__ : Optional[Any] = tokenizer.decode(output_padded[0] ,skip_special_tokens=lowercase_ )
lowerCAmelCase__ : List[Any] = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(lowercase_ ,lowercase_ )
self.assertListEqual(lowercase_ ,[non_padded_sentence, padded_sentence] )
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : List[Any] = '''facebook/opt-350m'''
lowerCAmelCase__ : Tuple = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
lowerCAmelCase__ : int = []
lowerCAmelCase__ : int = GPTaTokenizer.from_pretrained(lowercase_ )
lowerCAmelCase__ : List[str] = TFOPTForCausalLM.from_pretrained(lowercase_ )
for prompt in self.prompts:
lowerCAmelCase__ : Optional[Any] = tokenizer(lowercase_ ,return_tensors='''tf''' ).input_ids
lowerCAmelCase__ : Optional[Any] = model.generate(lowercase_ ,max_length=1_0 )
lowerCAmelCase__ : str = tokenizer.batch_decode(lowercase_ ,skip_special_tokens=lowercase_ )
predicted_outputs += generated_string
self.assertListEqual(lowercase_ ,lowercase_ )
| 450 | 0 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def __lowercase ( UpperCAmelCase__ = 8 ):
"""simple docstring"""
__lowerCAmelCase = ascii_letters + digits + punctuation
return "".join(secrets.choice(UpperCAmelCase__ ) for _ in range(UpperCAmelCase__ ) )
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
i -= len(UpperCAmelCase__ )
__lowerCAmelCase = i // 3
__lowerCAmelCase = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
__lowerCAmelCase = (
chars_incl
+ random(UpperCAmelCase__ , quotient + remainder )
+ random(UpperCAmelCase__ , UpperCAmelCase__ )
+ random(UpperCAmelCase__ , UpperCAmelCase__ )
)
__lowerCAmelCase = list(UpperCAmelCase__ )
shuffle(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
# random is a generalised function for letters, characters and numbers
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
return "".join(secrets.choice(UpperCAmelCase__ ) for _ in range(UpperCAmelCase__ ) )
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
pass # Put your code here...
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
pass # Put your code here...
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
pass # Put your code here...
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ = 8 ):
"""simple docstring"""
if len(UpperCAmelCase__ ) < min_length:
# Your Password must be at least 8 characters long
return False
__lowerCAmelCase = any(char in ascii_uppercase for char in password )
__lowerCAmelCase = any(char in ascii_lowercase for char in password )
__lowerCAmelCase = any(char in digits for char in password )
__lowerCAmelCase = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def __lowercase ( ):
"""simple docstring"""
__lowerCAmelCase = int(input('Please indicate the max length of your password: ' ).strip() )
__lowerCAmelCase = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' , password_generator(UpperCAmelCase__ ) )
print(
'Alternative Password generated:' , alternative_password_generator(UpperCAmelCase__ , UpperCAmelCase__ ) , )
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 102 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=8 ):
"""simple docstring"""
__lowerCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__lowerCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__=512 , UpperCAmelCase__=512 ):
"""simple docstring"""
__lowerCAmelCase = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
__lowerCAmelCase = np.array(pil_image.convert('RGB' ) )
__lowerCAmelCase = arr.astype(np.floataa ) / 127.5 - 1
__lowerCAmelCase = np.transpose(UpperCAmelCase__ , [2, 0, 1] )
__lowerCAmelCase = torch.from_numpy(UpperCAmelCase__ ).unsqueeze(0 )
return image
class snake_case_ ( _a ):
"""simple docstring"""
def __init__( self , _A , _A , _A , ):
super().__init__()
self.register_modules(
unet=_A , scheduler=_A , movq=_A , )
__lowerCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def A__ ( self , _A , _A , _A ):
# get the original timestep using init_timestep
__lowerCAmelCase = min(int(num_inference_steps * strength ) , _A )
__lowerCAmelCase = max(num_inference_steps - init_timestep , 0 )
__lowerCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def A__ ( self , _A , _A , _A , _A , _A , _A , _A=None ):
if not isinstance(_A , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_A )}""" )
__lowerCAmelCase = image.to(device=_A , dtype=_A )
__lowerCAmelCase = batch_size * num_images_per_prompt
if image.shape[1] == 4:
__lowerCAmelCase = image
else:
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(_A )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(_A , _A ):
__lowerCAmelCase = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_A )
]
__lowerCAmelCase = torch.cat(_A , dim=0 )
else:
__lowerCAmelCase = self.movq.encode(_A ).latent_dist.sample(_A )
__lowerCAmelCase = self.movq.config.scaling_factor * init_latents
__lowerCAmelCase = torch.cat([init_latents] , dim=0 )
__lowerCAmelCase = init_latents.shape
__lowerCAmelCase = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
# get latents
__lowerCAmelCase = self.scheduler.add_noise(_A , _A , _A )
__lowerCAmelCase = init_latents
return latents
def A__ ( self , _A=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__lowerCAmelCase = torch.device(F"""cuda:{gpu_id}""" )
__lowerCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
def A__ ( self , _A=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__lowerCAmelCase = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowerCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
__lowerCAmelCase, __lowerCAmelCase = cpu_offload_with_hook(_A , _A , prev_module_hook=_A )
# We'll offload the last model manually.
__lowerCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A__ ( self ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_A , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_A )
def __call__( self , _A , _A , _A , _A = 5_1_2 , _A = 5_1_2 , _A = 1_0_0 , _A = 4.0 , _A = 0.3 , _A = 1 , _A = None , _A = "pil" , _A = True , ):
__lowerCAmelCase = self._execution_device
__lowerCAmelCase = guidance_scale > 1.0
if isinstance(_A , _A ):
__lowerCAmelCase = torch.cat(_A , dim=0 )
__lowerCAmelCase = image_embeds.shape[0]
if isinstance(_A , _A ):
__lowerCAmelCase = torch.cat(_A , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase = image_embeds.repeat_interleave(_A , dim=0 )
__lowerCAmelCase = negative_image_embeds.repeat_interleave(_A , dim=0 )
__lowerCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
if not isinstance(_A , _A ):
__lowerCAmelCase = [image]
if not all(isinstance(_A , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(_A ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
__lowerCAmelCase = torch.cat([prepare_image(_A , _A , _A ) for i in image] , dim=0 )
__lowerCAmelCase = image.to(dtype=image_embeds.dtype , device=_A )
__lowerCAmelCase = self.movq.encode(_A )['latents']
__lowerCAmelCase = latents.repeat_interleave(_A , dim=0 )
self.scheduler.set_timesteps(_A , device=_A )
__lowerCAmelCase, __lowerCAmelCase = self.get_timesteps(_A , _A , _A )
__lowerCAmelCase = timesteps[:1].repeat(batch_size * num_images_per_prompt )
__lowerCAmelCase, __lowerCAmelCase = downscale_height_and_width(_A , _A , self.movq_scale_factor )
__lowerCAmelCase = self.prepare_latents(
_A , _A , _A , _A , image_embeds.dtype , _A , _A )
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
__lowerCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCAmelCase = {'image_embeds': image_embeds}
__lowerCAmelCase = self.unet(
sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0]
if do_classifier_free_guidance:
__lowerCAmelCase, __lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
__lowerCAmelCase, __lowerCAmelCase = noise_pred.chunk(2 )
__lowerCAmelCase, __lowerCAmelCase = variance_pred.chunk(2 )
__lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowerCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowerCAmelCase, __lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCAmelCase = self.scheduler.step(
_A , _A , _A , generator=_A , )[0]
# post-processing
__lowerCAmelCase = self.movq.decode(_A , force_not_quantize=_A )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
__lowerCAmelCase = image * 0.5 + 0.5
__lowerCAmelCase = image.clamp(0 , 1 )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCAmelCase = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 102 | 1 |
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-bert"
SCREAMING_SNAKE_CASE = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
SCREAMING_SNAKE_CASE = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ):
UpperCAmelCase_ = cached_file(lowerCAmelCase , lowerCAmelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(lowerCAmelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(lowerCAmelCase , lowerCAmelCase ) ) )
with open(os.path.join(lowerCAmelCase , "refs" , "main" ) ) as f:
UpperCAmelCase_ = f.read()
self.assertEqual(lowerCAmelCase , os.path.join(lowerCAmelCase , "snapshots" , lowerCAmelCase , lowerCAmelCase ) )
self.assertTrue(os.path.isfile(lowerCAmelCase ) )
# File is cached at the same place the second time.
UpperCAmelCase_ = cached_file(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
# Using a specific revision to test the full commit hash.
UpperCAmelCase_ = cached_file(lowerCAmelCase , lowerCAmelCase , revision="9b8c223" )
self.assertEqual(lowerCAmelCase , os.path.join(lowerCAmelCase , "snapshots" , lowerCAmelCase , lowerCAmelCase ) )
def A__ ( self ):
with self.assertRaisesRegex(lowerCAmelCase , "is not a valid model identifier" ):
UpperCAmelCase_ = cached_file("tiny-random-bert" , lowerCAmelCase )
with self.assertRaisesRegex(lowerCAmelCase , "is not a valid git identifier" ):
UpperCAmelCase_ = cached_file(lowerCAmelCase , lowerCAmelCase , revision="aaaa" )
with self.assertRaisesRegex(lowerCAmelCase , "does not appear to have a file named" ):
UpperCAmelCase_ = cached_file(lowerCAmelCase , "conf" )
def A__ ( self ):
with self.assertRaisesRegex(lowerCAmelCase , "does not appear to have a file named" ):
UpperCAmelCase_ = cached_file(lowerCAmelCase , "conf" )
with open(os.path.join(lowerCAmelCase , "refs" , "main" ) ) as f:
UpperCAmelCase_ = f.read()
self.assertTrue(os.path.isfile(os.path.join(lowerCAmelCase , ".no_exist" , lowerCAmelCase , "conf" ) ) )
UpperCAmelCase_ = cached_file(lowerCAmelCase , "conf" , _raise_exceptions_for_missing_entries=lowerCAmelCase )
self.assertIsNone(lowerCAmelCase )
UpperCAmelCase_ = cached_file(lowerCAmelCase , "conf" , local_files_only=lowerCAmelCase , _raise_exceptions_for_missing_entries=lowerCAmelCase )
self.assertIsNone(lowerCAmelCase )
UpperCAmelCase_ = mock.Mock()
UpperCAmelCase_ = 500
UpperCAmelCase_ = {}
UpperCAmelCase_ = HTTPError
UpperCAmelCase_ = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=lowerCAmelCase ) as mock_head:
UpperCAmelCase_ = cached_file(lowerCAmelCase , "conf" , _raise_exceptions_for_connection_errors=lowerCAmelCase )
self.assertIsNone(lowerCAmelCase )
# This check we did call the fake head request
mock_head.assert_called()
def A__ ( self ):
self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , lowerCAmelCase ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , lowerCAmelCase ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , lowerCAmelCase ) )
def A__ ( self ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(lowerCAmelCase , "is not a valid model identifier" ):
get_file_from_repo("bert-base-case" , lowerCAmelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(lowerCAmelCase , "is not a valid git identifier" ):
get_file_from_repo("bert-base-cased" , lowerCAmelCase , revision="ahaha" )
UpperCAmelCase_ = get_file_from_repo("bert-base-cased" , lowerCAmelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
UpperCAmelCase_ = json.loads(open(lowerCAmelCase , "r" ).read() )
self.assertEqual(config["hidden_size"] , 768 )
def A__ ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = Path(lowerCAmelCase ) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(lowerCAmelCase , "a.txt" ) , str(lowerCAmelCase ) )
self.assertIsNone(get_file_from_repo(lowerCAmelCase , "b.txt" ) )
| 579 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
SCREAMING_SNAKE_CASE = namedtuple("covid_data", "cases deaths recovered")
def snake_case__ ( __SCREAMING_SNAKE_CASE = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
UpperCAmelCase_ = "//div[@class = \"maincounter-number\"]/span/text()"
return covid_data(*html.fromstring(requests.get(__SCREAMING_SNAKE_CASE ).content ).xpath(__SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 579 | 1 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase : List[Any] = logging.get_logger(__name__)
def lowerCamelCase_ ( UpperCamelCase_ ):
_a : Tuple = torch.load(UpperCamelCase_ , map_location='''cpu''' )
if "model" in sd.keys():
_a : Optional[Any] = torch.load(UpperCamelCase_ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
_a : Union[str, Any] = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(UpperCamelCase_ )
_a : str = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_a : List[Any] = sd.pop(UpperCamelCase_ )
_a : Optional[int] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_a : Any = sd[key]
# We split QKV in separate Q,K,V
_a : Any = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
_a : int = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
_a : Any = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
_a : Tuple = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_a , _a , _a : Dict = torch.split(UpperCamelCase_ , depth // 3 , dim=0 )
_a : List[Any] = q
_a : List[str] = k
_a : int = v
del sd[key]
return sd
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ):
_a : Any = load_checkpoint(UpperCamelCase_ )
if config is not None:
_a : Union[str, Any] = OPTConfig.from_pretrained(UpperCamelCase_ )
else:
_a : List[str] = OPTConfig()
_a : Tuple = OPTModel(UpperCamelCase_ ).half().eval()
model.load_state_dict(UpperCamelCase_ )
# Check results
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
__UpperCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
__UpperCAmelCase : List[str] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 249 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowerCamelCase :
def __init__( self : Optional[Any] , __snake_case : Tuple , __snake_case : Tuple=2 , __snake_case : List[str]=32 , __snake_case : List[str]=16 , __snake_case : Optional[Any]=3 , __snake_case : List[str]=True , __snake_case : str=True , __snake_case : Optional[Any]=32 , __snake_case : Optional[int]=4 , __snake_case : str=[0, 1, 2, 3] , __snake_case : List[str]=4 , __snake_case : int=37 , __snake_case : int="gelu" , __snake_case : Tuple=0.1 , __snake_case : int=0.1 , __snake_case : List[str]=0.02 , __snake_case : Any=3 , __snake_case : Tuple=[1, 384, 24, 24] , __snake_case : List[Any]=True , __snake_case : List[Any]=None , ) -> str:
_a : List[Any] = parent
_a : str = batch_size
_a : Dict = image_size
_a : str = patch_size
_a : Union[str, Any] = num_channels
_a : Dict = is_training
_a : Union[str, Any] = use_labels
_a : List[str] = hidden_size
_a : Dict = num_hidden_layers
_a : List[str] = backbone_out_indices
_a : Any = num_attention_heads
_a : str = intermediate_size
_a : List[Any] = hidden_act
_a : Dict = hidden_dropout_prob
_a : str = attention_probs_dropout_prob
_a : Tuple = initializer_range
_a : Dict = num_labels
_a : Any = backbone_featmap_shape
_a : List[Any] = scope
_a : List[str] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_a : str = (image_size // patch_size) ** 2
_a : Union[str, Any] = num_patches + 1
def snake_case_ ( self : Any ) -> Optional[int]:
_a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Any = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_a : Optional[int] = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self : Union[str, Any] ) -> List[str]:
_a : Optional[Any] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__snake_case , backbone_featmap_shape=self.backbone_featmap_shape , )
def snake_case_ ( self : List[Any] , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Dict ) -> int:
_a : Optional[int] = DPTModel(config=__snake_case )
model.to(__snake_case )
model.eval()
_a : List[Any] = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self : List[str] , __snake_case : Optional[int] , __snake_case : int , __snake_case : Optional[Any] ) -> Any:
_a : Any = self.num_labels
_a : int = DPTForDepthEstimation(__snake_case )
model.to(__snake_case )
model.eval()
_a : Optional[int] = model(__snake_case )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def snake_case_ ( self : Any , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : Dict ) -> Optional[Any]:
_a : Optional[Any] = self.num_labels
_a : Any = DPTForSemanticSegmentation(__snake_case )
model.to(__snake_case )
model.eval()
_a : Optional[int] = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def snake_case_ ( self : Union[str, Any] ) -> List[Any]:
_a : int = self.prepare_config_and_inputs()
_a , _a , _a : Any = config_and_inputs
_a : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCAmelCase : List[str] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
UpperCAmelCase : Tuple = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase : int = False
UpperCAmelCase : str = False
UpperCAmelCase : Dict = False
def snake_case_ ( self : Union[str, Any] ) -> List[Any]:
_a : Union[str, Any] = DPTModelTester(self )
_a : Optional[int] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def snake_case_ ( self : List[Any] ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def snake_case_ ( self : str ) -> str:
pass
def snake_case_ ( self : int ) -> Optional[int]:
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Optional[int] = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_a : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def snake_case_ ( self : int ) -> Any:
_a , _a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Optional[int] = model_class(__snake_case )
_a : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Tuple = [*signature.parameters.keys()]
_a : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def snake_case_ ( self : int ) -> Any:
_a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def snake_case_ ( self : List[Any] ) -> Optional[int]:
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__snake_case )
def snake_case_ ( self : int ) -> Any:
_a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__snake_case )
def snake_case_ ( self : Union[str, Any] ) -> List[str]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
_a : Tuple = True
if model_class in get_values(__snake_case ):
continue
_a : List[str] = model_class(__snake_case )
model.to(__snake_case )
model.train()
_a : Tuple = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
_a : Tuple = model(**__snake_case ).loss
loss.backward()
def snake_case_ ( self : Any ) -> Union[str, Any]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_a , _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_a : str = False
_a : int = True
if model_class in get_values(__snake_case ) or not model_class.supports_gradient_checkpointing:
continue
_a : Tuple = model_class(__snake_case )
model.to(__snake_case )
model.gradient_checkpointing_enable()
model.train()
_a : str = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
_a : Tuple = model(**__snake_case ).loss
loss.backward()
def snake_case_ ( self : Tuple ) -> Optional[Any]:
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
_a : Optional[int] = _config_zero_init(__snake_case )
for model_class in self.all_model_classes:
_a : Tuple = model_class(config=__snake_case )
# Skip the check for the backbone
_a : List[str] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_a : str = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case_ ( self : Dict ) -> Optional[Any]:
pass
@slow
def snake_case_ ( self : str ) -> str:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_a : Optional[int] = DPTModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def snake_case_ ( self : Any ) -> Union[str, Any]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : Tuple = '''add'''
with self.assertRaises(__snake_case ):
_a : List[str] = DPTForDepthEstimation(__snake_case )
def lowerCamelCase_ ( ):
_a : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class lowerCamelCase ( unittest.TestCase ):
def snake_case_ ( self : Any ) -> Optional[Any]:
_a : Any = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
_a : Union[str, Any] = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(__snake_case )
_a : Optional[int] = prepare_img()
_a : Any = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
_a : List[str] = model(**__snake_case )
_a : Optional[Any] = outputs.predicted_depth
# verify the predicted depth
_a : Optional[Any] = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , __snake_case )
_a : int = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __snake_case , atol=1E-4 ) )
| 249 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 'yolos'
def __init__( self : Union[str, Any] ,lowercase__ : Any=7_6_8 ,lowercase__ : Optional[Any]=1_2 ,lowercase__ : Optional[Any]=1_2 ,lowercase__ : List[Any]=3_0_7_2 ,lowercase__ : List[Any]="gelu" ,lowercase__ : Optional[int]=0.0 ,lowercase__ : Tuple=0.0 ,lowercase__ : str=0.0_2 ,lowercase__ : Any=1e-1_2 ,lowercase__ : Optional[Any]=[5_1_2, 8_6_4] ,lowercase__ : Union[str, Any]=1_6 ,lowercase__ : Optional[int]=3 ,lowercase__ : Any=True ,lowercase__ : str=1_0_0 ,lowercase__ : int=True ,lowercase__ : str=False ,lowercase__ : List[Any]=1 ,lowercase__ : List[Any]=5 ,lowercase__ : Dict=2 ,lowercase__ : int=5 ,lowercase__ : int=2 ,lowercase__ : Optional[Any]=0.1 ,**lowercase__ : int ,):
super().__init__(**lowercase__ )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = qkv_bias
__lowercase = num_detection_tokens
__lowercase = use_mid_position_embeddings
__lowercase = auxiliary_loss
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = eos_coefficient
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE ( self : str ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE ( self : str ):
return 1e-4
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return 1_2
| 41 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase_ = """src/diffusers"""
lowercase_ = """."""
# This is to make sure the diffusers module imported is the one in the repo.
lowercase_ = importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowercase_ = spec.loader.load_module()
def a__ ( snake_case , snake_case ):
"""simple docstring"""
return line.startswith(snake_case ) or len(snake_case ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , snake_case ) is not None
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = object_name.split('''.''' )
__SCREAMING_SNAKE_CASE : str = 0
# First let's find the module where our object lives.
__SCREAMING_SNAKE_CASE : Any = parts[i]
while i < len(snake_case ) and not os.path.isfile(os.path.join(snake_case , F'''{module}.py''' ) ):
i += 1
if i < len(snake_case ):
__SCREAMING_SNAKE_CASE : str = os.path.join(snake_case , parts[i] )
if i >= len(snake_case ):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(snake_case , F'''{module}.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__SCREAMING_SNAKE_CASE : Dict = f.readlines()
# Now let's find the class / func in the code!
__SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(snake_case ) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(snake_case ):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__SCREAMING_SNAKE_CASE : List[Any] = line_index
while line_index < len(snake_case ) and _should_continue(lines[line_index] , snake_case ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__SCREAMING_SNAKE_CASE : Dict = lines[start_index:line_index]
return "".join(snake_case )
lowercase_ = re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
lowercase_ = re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""")
lowercase_ = re.compile(R"""<FILL\s+[^>]*>""")
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = code.split('''\n''' )
__SCREAMING_SNAKE_CASE : Dict = 0
while idx < len(snake_case ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(snake_case ):
return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0]
return ""
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(get_indent(snake_case ) ) > 0
if has_indent:
__SCREAMING_SNAKE_CASE : List[Any] = F'''class Bla:\n{code}'''
__SCREAMING_SNAKE_CASE : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = black.format_str(snake_case , mode=snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = style_docstrings_in_code(snake_case )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def a__ ( snake_case , snake_case=False ):
"""simple docstring"""
with open(snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__SCREAMING_SNAKE_CASE : List[str] = f.readlines()
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : int = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(snake_case ):
__SCREAMING_SNAKE_CASE : Dict = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = search.groups()
__SCREAMING_SNAKE_CASE : int = find_code_in_diffusers(snake_case )
__SCREAMING_SNAKE_CASE : str = get_indent(snake_case )
__SCREAMING_SNAKE_CASE : Any = line_index + 1 if indent == theoretical_indent else line_index + 2
__SCREAMING_SNAKE_CASE : Dict = theoretical_indent
__SCREAMING_SNAKE_CASE : Optional[int] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__SCREAMING_SNAKE_CASE : List[Any] = True
while line_index < len(snake_case ) and should_continue:
line_index += 1
if line_index >= len(snake_case ):
break
__SCREAMING_SNAKE_CASE : Any = lines[line_index]
__SCREAMING_SNAKE_CASE : Optional[Any] = _should_continue(snake_case , snake_case ) and re.search(F'''^{indent}# End copy''' , snake_case ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__SCREAMING_SNAKE_CASE : List[str] = lines[start_index:line_index]
__SCREAMING_SNAKE_CASE : Dict = ''''''.join(snake_case )
# Remove any nested `Copied from` comments to avoid circular copies
__SCREAMING_SNAKE_CASE : Tuple = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(snake_case ) is None]
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''\n'''.join(snake_case )
# Before comparing, use the `replace_pattern` on the original code.
if len(snake_case ) > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = replace_pattern.replace('''with''' , '''''' ).split(''',''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = [_re_replace_pattern.search(snake_case ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = pattern.groups()
__SCREAMING_SNAKE_CASE : str = re.sub(snake_case , snake_case , snake_case )
if option.strip() == "all-casing":
__SCREAMING_SNAKE_CASE : Optional[Any] = re.sub(obja.lower() , obja.lower() , snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(obja.upper() , obja.upper() , snake_case )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__SCREAMING_SNAKE_CASE : Optional[Any] = blackify(lines[start_index - 1] + theoretical_code )
__SCREAMING_SNAKE_CASE : int = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:]
__SCREAMING_SNAKE_CASE : str = start_index + 1
if overwrite and len(snake_case ) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''' )
with open(snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(snake_case )
return diffs
def a__ ( snake_case = False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = glob.glob(os.path.join(snake_case , '''**/*.py''' ) , recursive=snake_case )
__SCREAMING_SNAKE_CASE : Tuple = []
for filename in all_files:
__SCREAMING_SNAKE_CASE : int = is_copy_consistent(snake_case , snake_case )
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(snake_case ) > 0:
__SCREAMING_SNAKE_CASE : Optional[int] = '''\n'''.join(snake_case )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowercase_ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 74 | 0 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'vocab.txt'}
__UpperCAmelCase = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
__UpperCAmelCase = {
'facebook/esm2_t6_8M_UR50D': 1_0_2_4,
'facebook/esm2_t12_35M_UR50D': 1_0_2_4,
}
def A_ ( lowercase_ ) ->List[Any]:
"""simple docstring"""
with open(__lowerCAmelCase , 'r' ) as f:
SCREAMING_SNAKE_CASE = f.read().splitlines()
return [l.strip() for l in lines]
class a_( lowercase__ ):
"""simple docstring"""
__snake_case : Tuple =VOCAB_FILES_NAMES
__snake_case : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
__snake_case : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : Dict =['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str="<unk>" , lowerCAmelCase__ : Tuple="<cls>" , lowerCAmelCase__ : Tuple="<pad>" , lowerCAmelCase__ : Any="<mask>" , lowerCAmelCase__ : Dict="<eos>" , **lowerCAmelCase__ : str , ) -> List[str]:
"""simple docstring"""
super().__init__(**_lowerCAmelCase)
SCREAMING_SNAKE_CASE = load_vocab_file(_lowerCAmelCase)
SCREAMING_SNAKE_CASE = dict(enumerate(self.all_tokens))
SCREAMING_SNAKE_CASE = {tok: ind for ind, tok in enumerate(self.all_tokens)}
SCREAMING_SNAKE_CASE = unk_token
SCREAMING_SNAKE_CASE = cls_token
SCREAMING_SNAKE_CASE = pad_token
SCREAMING_SNAKE_CASE = mask_token
SCREAMING_SNAKE_CASE = eos_token
SCREAMING_SNAKE_CASE = self.all_tokens
self._create_trie(self.unique_no_split_tokens)
def __UpperCamelCase ( self : List[Any] , lowerCAmelCase__ : Dict) -> List[str]:
"""simple docstring"""
return self._id_to_token.get(_lowerCAmelCase , self.unk_token)
def __UpperCamelCase ( self : str , lowerCAmelCase__ : Tuple) -> Optional[int]:
"""simple docstring"""
return self._token_to_id.get(_lowerCAmelCase , self._token_to_id.get(self.unk_token))
def __UpperCamelCase ( self : str , lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : Optional[Any]) -> List[Any]:
"""simple docstring"""
return text.split()
def __UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : Dict=False) -> Union[str, Any]:
"""simple docstring"""
return len(self._id_to_token)
def __UpperCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
return {token: i for i, token in enumerate(self.all_tokens)}
def __UpperCamelCase ( self : Optional[Any] , lowerCAmelCase__ : Optional[Any]) -> Optional[int]:
"""simple docstring"""
return self._token_to_id.get(_lowerCAmelCase , self._token_to_id.get(self.unk_token))
def __UpperCamelCase ( self : Any , lowerCAmelCase__ : str) -> Dict:
"""simple docstring"""
return self._id_to_token.get(_lowerCAmelCase , self.unk_token)
def __UpperCamelCase ( self : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] = None) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!')
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] = None , lowerCAmelCase__ : int = False) -> List[Any]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.')
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
SCREAMING_SNAKE_CASE = [1] + ([0] * len(_lowerCAmelCase)) + [1]
if token_ids_a is not None:
mask += [0] * len(_lowerCAmelCase) + [1]
return mask
def __UpperCamelCase ( self : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = os.path.join(_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt')
with open(_lowerCAmelCase , 'w') as f:
f.write('\n'.join(self.all_tokens))
return (vocab_file,)
@property
def __UpperCamelCase ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
return self.get_vocab_size(with_added_tokens=_lowerCAmelCase)
def __UpperCamelCase ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] = False) -> Any:
"""simple docstring"""
return super()._add_tokens(_lowerCAmelCase , special_tokens=_lowerCAmelCase)
| 709 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A_ ( lowercase_ , lowercase_ ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = args.log_outputs
SCREAMING_SNAKE_CASE = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
SCREAMING_SNAKE_CASE = load_metric('wer' )
SCREAMING_SNAKE_CASE = load_metric('cer' )
# compute metrics
SCREAMING_SNAKE_CASE = wer.compute(references=result['target'] , predictions=result['prediction'] )
SCREAMING_SNAKE_CASE = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
SCREAMING_SNAKE_CASE = f'''WER: {wer_result}\nCER: {cer_result}'''
print(lowercase_ )
with open(f'''{dataset_id}_eval_results.txt''' , 'w' ) as f:
f.write(lowercase_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
SCREAMING_SNAKE_CASE = f'''log_{dataset_id}_predictions.txt'''
SCREAMING_SNAKE_CASE = f'''log_{dataset_id}_targets.txt'''
with open(lowercase_ , 'w' ) as p, open(lowercase_ , 'w' ) as t:
# mapping function to write output
def write_to_file(lowercase_ , lowercase_ ):
p.write(f'''{i}''' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f'''{i}''' + '\n' )
t.write(batch['target'] + '\n' )
result.map(lowercase_ , with_indices=lowercase_ )
def A_ ( lowercase_ ) ->str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
SCREAMING_SNAKE_CASE = re.sub(lowercase_ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
SCREAMING_SNAKE_CASE = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
SCREAMING_SNAKE_CASE = ' '.join(text.split(lowercase_ ) )
return text
def A_ ( lowercase_ ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowercase_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(args.model_id )
SCREAMING_SNAKE_CASE = feature_extractor.sampling_rate
# resample audio
SCREAMING_SNAKE_CASE = dataset.cast_column('audio' , Audio(sampling_rate=lowercase_ ) )
# load eval pipeline
if args.device is None:
SCREAMING_SNAKE_CASE = 0 if torch.cuda.is_available() else -1
SCREAMING_SNAKE_CASE = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowercase_ ):
SCREAMING_SNAKE_CASE = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
SCREAMING_SNAKE_CASE = prediction['text']
SCREAMING_SNAKE_CASE = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
SCREAMING_SNAKE_CASE = dataset.map(lowercase_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowercase_ , lowercase_ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
__UpperCAmelCase = parser.parse_args()
main(args)
| 259 | 0 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self ) -> Any:
"""simple docstring"""
super().__init__()
lowercase_ : Union[str, Any] = nn.Linear(3, 4 )
lowercase_ : Any = nn.BatchNormad(4 )
lowercase_ : Union[str, Any] = nn.Linear(4, 5 )
def snake_case__ ( self, snake_case__ ) -> int:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(__lowerCAmelCase ) ) )
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
def snake_case__ ( self, snake_case__, *snake_case__, **snake_case__ ) -> Any:
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
def snake_case__ ( self, snake_case__, snake_case__ ) -> Tuple:
"""simple docstring"""
return output + 1
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
lowercase_ : List[str] = ModelForTest()
lowercase_ : Tuple = ModelHook()
add_hook_to_module(__lowerCAmelCase, __lowerCAmelCase )
self.assertEqual(test_model._hf_hook, __lowerCAmelCase )
self.assertTrue(hasattr(__lowerCAmelCase, """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__, """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ), ["""x"""] )
remove_hook_from_module(__lowerCAmelCase )
self.assertFalse(hasattr(__lowerCAmelCase, """_hf_hook""" ) )
self.assertFalse(hasattr(__lowerCAmelCase, """_old_forward""" ) )
def snake_case__ ( self ) -> str:
"""simple docstring"""
lowercase_ : Union[str, Any] = ModelForTest()
lowercase_ : Any = ModelHook()
add_hook_to_module(__lowerCAmelCase, __lowerCAmelCase )
add_hook_to_module(__lowerCAmelCase, __lowerCAmelCase, append=__lowerCAmelCase )
self.assertEqual(isinstance(test_model._hf_hook, __lowerCAmelCase ), __lowerCAmelCase )
self.assertEqual(len(test_model._hf_hook.hooks ), 2 )
self.assertTrue(hasattr(__lowerCAmelCase, """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__, """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ), ["""x"""] )
remove_hook_from_module(__lowerCAmelCase )
self.assertFalse(hasattr(__lowerCAmelCase, """_hf_hook""" ) )
self.assertFalse(hasattr(__lowerCAmelCase, """_old_forward""" ) )
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
lowercase_ : Optional[int] = ModelForTest()
lowercase_ : Union[str, Any] = torch.randn(2, 3 )
lowercase_ : int = test_model(x + 1 )
lowercase_ : List[Any] = test_model(x + 2 )
lowercase_ : Optional[int] = PreForwardHook()
add_hook_to_module(__lowerCAmelCase, __lowerCAmelCase )
lowercase_ : Dict = test_model(__lowerCAmelCase )
self.assertTrue(torch.allclose(__lowerCAmelCase, __lowerCAmelCase, atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowercase_ : Tuple = PreForwardHook()
add_hook_to_module(__lowerCAmelCase, __lowerCAmelCase )
lowercase_ : List[str] = test_model(__lowerCAmelCase )
self.assertTrue(torch.allclose(__lowerCAmelCase, __lowerCAmelCase, atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowercase_ : Dict = SequentialHook(PreForwardHook(), PreForwardHook() )
add_hook_to_module(__lowerCAmelCase, __lowerCAmelCase )
lowercase_ : Union[str, Any] = test_model(__lowerCAmelCase )
assert torch.allclose(__lowerCAmelCase, __lowerCAmelCase, atol=1E-5 )
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Dict = ModelForTest()
lowercase_ : int = torch.randn(2, 3 )
lowercase_ : Union[str, Any] = test_model(__lowerCAmelCase )
lowercase_ : List[str] = PostForwardHook()
add_hook_to_module(__lowerCAmelCase, __lowerCAmelCase )
lowercase_ : List[Any] = test_model(__lowerCAmelCase )
self.assertTrue(torch.allclose(__lowerCAmelCase, output + 1, atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowercase_ : int = PostForwardHook()
add_hook_to_module(__lowerCAmelCase, __lowerCAmelCase )
lowercase_ : List[Any] = test_model(__lowerCAmelCase )
self.assertTrue(torch.allclose(__lowerCAmelCase, output + 1, atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowercase_ : int = SequentialHook(PostForwardHook(), PostForwardHook() )
add_hook_to_module(__lowerCAmelCase, __lowerCAmelCase )
lowercase_ : Optional[Any] = test_model(__lowerCAmelCase )
assert torch.allclose(__lowerCAmelCase, output + 2, atol=1E-5 )
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
lowercase_ : Union[str, Any] = ModelForTest()
lowercase_ : Optional[Any] = torch.randn(2, 3 )
lowercase_ : List[Any] = test_model(__lowerCAmelCase )
lowercase_ : Optional[int] = PostForwardHook()
add_hook_to_module(__lowerCAmelCase, __lowerCAmelCase )
lowercase_ : Tuple = test_model(__lowerCAmelCase )
self.assertTrue(torch.allclose(__lowerCAmelCase, output + 1 ) )
self.assertTrue(outputa.requires_grad )
lowercase_ : Tuple = True
lowercase_ : Optional[int] = test_model(__lowerCAmelCase )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Union[str, Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara, AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm, AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara, AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device, torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device, torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device(0 ) )
self.assertEqual(model.lineara.weight.device, torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
lowercase_ : Optional[Any] = torch.randn(2, 3 )
lowercase_ : List[str] = model(__lowerCAmelCase )
self.assertEqual(output.device, torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__lowerCAmelCase, AlignDevicesHook(io_same_device=__lowerCAmelCase ) )
lowercase_ : Tuple = torch.randn(2, 3 ).to(0 )
lowercase_ : int = model(__lowerCAmelCase )
self.assertEqual(output.device, torch.device(0 ) )
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
lowercase_ : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowercase_ : int = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara, AlignDevicesHook(**__lowerCAmelCase ) )
add_hook_to_module(model.batchnorm, AlignDevicesHook(**__lowerCAmelCase ) )
add_hook_to_module(model.lineara, AlignDevicesHook(**__lowerCAmelCase ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowercase_ : List[Any] = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device, __lowerCAmelCase )
lowercase_ : Optional[int] = torch.randn(2, 3 )
lowercase_ : str = model(__lowerCAmelCase )
self.assertEqual(output.device, __lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# Now test with buffers included in the offload
lowercase_ : Optional[Any] = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara, AlignDevicesHook(**__lowerCAmelCase ) )
add_hook_to_module(model.batchnorm, AlignDevicesHook(**__lowerCAmelCase ) )
add_hook_to_module(model.lineara, AlignDevicesHook(**__lowerCAmelCase ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device("""meta""" ) )
lowercase_ : List[Any] = torch.randn(2, 3 )
lowercase_ : str = model(__lowerCAmelCase )
self.assertEqual(output.device, __lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Dict = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowercase_ : Union[str, Any] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__lowerCAmelCase, execution_device=__lowerCAmelCase, offload=__lowerCAmelCase )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowercase_ : Optional[Any] = torch.device(__lowerCAmelCase )
self.assertEqual(model.batchnorm.running_mean.device, __lowerCAmelCase )
lowercase_ : List[Any] = torch.randn(2, 3 )
lowercase_ : Any = model(__lowerCAmelCase )
self.assertEqual(output.device, __lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__lowerCAmelCase )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__lowerCAmelCase, execution_device=__lowerCAmelCase, offload=__lowerCAmelCase, offload_buffers=__lowerCAmelCase )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device("""meta""" ) )
lowercase_ : Any = torch.randn(2, 3 )
lowercase_ : Optional[int] = model(__lowerCAmelCase )
self.assertEqual(output.device, __lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__lowerCAmelCase )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
lowercase_ : Union[str, Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowercase_ : Optional[Any] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__lowerCAmelCase, execution_device=__lowerCAmelCase, offload=__lowerCAmelCase, weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowercase_ : Union[str, Any] = torch.device(__lowerCAmelCase )
self.assertEqual(model.batchnorm.running_mean.device, __lowerCAmelCase )
lowercase_ : int = torch.randn(2, 3 )
lowercase_ : Any = model(__lowerCAmelCase )
self.assertEqual(output.device, __lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__lowerCAmelCase )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__lowerCAmelCase, execution_device=__lowerCAmelCase, offload=__lowerCAmelCase, weights_map=model.state_dict(), offload_buffers=__lowerCAmelCase, )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device("""meta""" ) )
lowercase_ : List[str] = torch.randn(2, 3 )
lowercase_ : int = model(__lowerCAmelCase )
self.assertEqual(output.device, __lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__lowerCAmelCase )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) ) | 458 | """simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase : Tuple =logging.get_logger(__name__)
__lowerCAmelCase : Dict ={
"""ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""",
}
class _A ( lowerCAmelCase ):
snake_case__ : Optional[int] = 'deta'
snake_case__ : Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=900 , __lowerCAmelCase=2048 , __lowerCAmelCase=6 , __lowerCAmelCase=2048 , __lowerCAmelCase=8 , __lowerCAmelCase=6 , __lowerCAmelCase=1024 , __lowerCAmelCase=8 , __lowerCAmelCase=0.0 , __lowerCAmelCase=True , __lowerCAmelCase="relu" , __lowerCAmelCase=256 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1.0 , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase="sine" , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=4 , __lowerCAmelCase=True , __lowerCAmelCase=300 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=1 , __lowerCAmelCase=5 , __lowerCAmelCase=2 , __lowerCAmelCase=1 , __lowerCAmelCase=1 , __lowerCAmelCase=5 , __lowerCAmelCase=2 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.2_5 , **__lowerCAmelCase , ):
"""simple docstring"""
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowercase = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] )
else:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase = backbone_config.pop("""model_type""" )
lowercase = CONFIG_MAPPING[backbone_model_type]
lowercase = config_class.from_dict(__lowerCAmelCase )
lowercase = backbone_config
lowercase = num_queries
lowercase = max_position_embeddings
lowercase = d_model
lowercase = encoder_ffn_dim
lowercase = encoder_layers
lowercase = encoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = activation_function
lowercase = init_std
lowercase = init_xavier_std
lowercase = encoder_layerdrop
lowercase = auxiliary_loss
lowercase = position_embedding_type
# deformable attributes
lowercase = num_feature_levels
lowercase = encoder_n_points
lowercase = decoder_n_points
lowercase = two_stage
lowercase = two_stage_num_proposals
lowercase = with_box_refine
lowercase = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
lowercase = class_cost
lowercase = bbox_cost
lowercase = giou_cost
# Loss coefficients
lowercase = mask_loss_coefficient
lowercase = dice_loss_coefficient
lowercase = bbox_loss_coefficient
lowercase = giou_loss_coefficient
lowercase = eos_coefficient
lowercase = focal_alpha
super().__init__(is_encoder_decoder=__lowerCAmelCase , **__lowerCAmelCase )
@property
def A__ ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def A__ ( self ):
"""simple docstring"""
return self.d_model
def A__ ( self ):
"""simple docstring"""
lowercase = copy.deepcopy(self.__dict__ )
lowercase = self.backbone_config.to_dict()
lowercase = self.__class__.model_type
return output
| 359 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
_lowerCamelCase : Any = None
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : Tuple = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
_lowerCamelCase : Optional[int] = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
_lowerCamelCase : List[Any] = '''▁'''
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["""input_ids""", """attention_mask"""]
lowercase_ = BarthezTokenizer
def __init__( self , lowercase__=None , lowercase__=None , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , **lowercase__ , ):
'''simple docstring'''
__A =AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
super().__init__(
lowercase__ , tokenizer_file=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , **lowercase__ , )
__A =vocab_file
__A =False if not self.vocab_file else True
def __UpperCamelCase ( self , lowercase__ , lowercase__ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A =[self.cls_token_id]
__A =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self , lowercase__ , lowercase__ = None ):
'''simple docstring'''
__A =[self.sep_token_id]
__A =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self , lowercase__ , lowercase__ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowercase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__A =os.path.join(
lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ):
copyfile(self.vocab_file , lowercase__ )
return (out_vocab_file,)
| 516 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
_lowerCamelCase : Optional[int] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowercase__ ):
'''simple docstring'''
super().__init__()
__A =torchvision.models.resnetaaa(pretrained=lowercase__ )
__A =list(model.children() )[:-2]
__A =nn.Sequential(*lowercase__ )
__A =nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
__A =self.pool(self.model(lowercase__ ) )
__A =torch.flatten(lowercase__ , start_dim=2 )
__A =out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
__A =[json.loads(lowercase__ ) for l in open(lowercase__ )]
__A =os.path.dirname(lowercase__ )
__A =tokenizer
__A =labels
__A =len(lowercase__ )
__A =max_seq_length
__A =transforms
def __len__( self ):
'''simple docstring'''
return len(self.data )
def __getitem__( self , lowercase__ ):
'''simple docstring'''
__A =torch.LongTensor(self.tokenizer.encode(self.data[index]['''text'''] , add_special_tokens=lowercase__ ) )
__A , __A , __A =sentence[0], sentence[1:-1], sentence[-1]
__A =sentence[: self.max_seq_length]
__A =torch.zeros(self.n_classes )
__A =1
__A =Image.open(os.path.join(self.data_dir , self.data[index]['''img'''] ) ).convert('''RGB''' )
__A =self.transforms(lowercase__ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =Counter()
for row in self.data:
label_freqs.update(row['''label'''] )
return label_freqs
def A__ ( __A : Tuple ) ->Optional[int]:
__A =[len(row['''sentence'''] ) for row in batch]
__A , __A =len(__A ), max(__A )
__A =torch.zeros(__A , __A , dtype=torch.long )
__A =torch.zeros(__A , __A , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(__A , __A ) ):
__A =input_row['''sentence''']
__A =1
__A =torch.stack([row['''image'''] for row in batch] )
__A =torch.stack([row['''label'''] for row in batch] )
__A =torch.stack([row['''image_start_token'''] for row in batch] )
__A =torch.stack([row['''image_end_token'''] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def A__ ( ) ->List[Any]:
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def A__ ( ) ->List[Any]:
return transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46777044, 0.44531429, 0.40661017] , std=[0.12221994, 0.12145835, 0.14380469] , ),
] )
| 516 | 1 |
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__=5 ) -> List[Any]:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count("<mask>" ) == 1
__UpperCAmelCase : str = torch.tensor(tokenizer.encode(__lowerCamelCase, add_special_tokens=__lowerCamelCase ) ).unsqueeze(0 ) # Batch size 1
__UpperCAmelCase : int = model(__lowerCamelCase )[0] # The last hidden-state is the first element of the output tuple
__UpperCAmelCase : Optional[int] = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__UpperCAmelCase : Union[str, Any] = logits[0, masked_index, :]
__UpperCAmelCase : Union[str, Any] = logits.softmax(dim=0 )
__UpperCAmelCase : int = prob.topk(k=__lowerCamelCase, dim=0 )
__UpperCAmelCase : Dict = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(__lowerCamelCase ) )] )
__UpperCAmelCase : int = tokenizer.mask_token
__UpperCAmelCase : str = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
__UpperCAmelCase : int = predicted_token_bpe.replace("\u2581", " " )
if " {0}".format(__lowerCamelCase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(__lowerCamelCase ), __lowerCamelCase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(__lowerCamelCase, __lowerCamelCase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
_snake_case = CamembertTokenizer.from_pretrained('''camembert-base''')
_snake_case = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
_snake_case = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 382 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 560 | 0 |
from __future__ import annotations
import requests
__A : Optional[int] = set(
'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split()
)
def __a ( A__ : str , A__ : int = 1 , A__ : str = "new" , A__ : list | None = None ):
SCREAMING_SNAKE_CASE = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(A__ ) - valid_terms ) ):
SCREAMING_SNAKE_CASE = F"Invalid search term: {invalid_search_terms}"
raise ValueError(A__ )
SCREAMING_SNAKE_CASE = requests.get(
F"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}" , headers={"User-agent": "A random string"} , )
if response.status_code == 429:
raise requests.HTTPError
SCREAMING_SNAKE_CASE = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(A__ )}
SCREAMING_SNAKE_CASE = {}
for id_ in range(A__ ):
SCREAMING_SNAKE_CASE = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext'])) | 698 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCamelCase , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(__lowerCamelCase , "num_attention_heads" ) )
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=13 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : Dict=3 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : int=2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Tuple=[128, 256, 384] , __lowerCamelCase : int=[4, 6, 8] , __lowerCamelCase : Tuple=[2, 3, 4] , __lowerCamelCase : List[str]=[16, 16, 16] , __lowerCamelCase : int=0 , __lowerCamelCase : List[Any]=[2, 2, 2] , __lowerCamelCase : List[str]=[2, 2, 2] , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : Any=True , __lowerCamelCase : Dict=True , __lowerCamelCase : int=2 , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = kernel_size
SCREAMING_SNAKE_CASE = stride
SCREAMING_SNAKE_CASE = padding
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = key_dim
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = attention_ratio
SCREAMING_SNAKE_CASE = mlp_ratio
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = initializer_range
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : List[str] ):
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _snake_case ( self : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Any ):
SCREAMING_SNAKE_CASE = LevitModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
SCREAMING_SNAKE_CASE = (self.image_size, self.image_size)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
SCREAMING_SNAKE_CASE = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def _snake_case ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = LevitForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = LevitModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def _snake_case ( self : Any ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self : Any ):
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def _snake_case ( self : Union[str, Any] ):
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def _snake_case ( self : Tuple ):
pass
@unittest.skip(reason="Levit does not output attentions" )
def _snake_case ( self : Tuple ):
pass
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _snake_case ( self : List[Any] ):
def check_hidden_states_output(__lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = len(self.model_tester.depths ) + 1
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
SCREAMING_SNAKE_CASE = (self.model_tester.image_size, self.model_tester.image_size)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
SCREAMING_SNAKE_CASE = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self : int ):
pass
def _snake_case ( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : int=False ):
SCREAMING_SNAKE_CASE = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def _snake_case ( self : List[Any] ):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__lowerCamelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ).loss
loss.backward()
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
if model_class in get_values(__lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(__lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ).loss
loss.backward()
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__lowerCamelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}" ):
SCREAMING_SNAKE_CASE = problem_type["title"]
SCREAMING_SNAKE_CASE = problem_type["num_labels"]
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
SCREAMING_SNAKE_CASE = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__lowerCamelCase ) as warning_list:
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def _snake_case ( self : List[Any] ):
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = LevitModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def __a ( ):
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : Dict ):
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) | 698 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , a : List[str] , a : Optional[int]=7 , a : Tuple=3 , a : Optional[Any]=30 , a : Optional[Any]=400 , a : Optional[Any]=True , a : List[str]=None , a : Optional[Any]=True , a : Union[str, Any]=[0.5, 0.5, 0.5] , a : Optional[int]=[0.5, 0.5, 0.5] , a : Optional[Any]=True , a : Dict=1 / 255 , a : Dict=True , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : str = batch_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : Any = min_resolution
SCREAMING_SNAKE_CASE : List[Any] = max_resolution
SCREAMING_SNAKE_CASE : Optional[int] = do_resize
SCREAMING_SNAKE_CASE : Any = size
SCREAMING_SNAKE_CASE : int = do_normalize
SCREAMING_SNAKE_CASE : Union[str, Any] = image_mean
SCREAMING_SNAKE_CASE : Dict = image_std
SCREAMING_SNAKE_CASE : Optional[int] = do_rescale
SCREAMING_SNAKE_CASE : str = rescale_factor
SCREAMING_SNAKE_CASE : Any = do_pad
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCamelCase ( self : List[Any] , a : List[Any] , a : str=False ) -> Tuple:
"""simple docstring"""
if not batched:
SCREAMING_SNAKE_CASE : Any = image_inputs[0]
if isinstance(a , Image.Image ):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = image.size
else:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE : List[Any] = int(self.size["shortest_edge"] * h / w )
SCREAMING_SNAKE_CASE : List[Any] = self.size["shortest_edge"]
elif w > h:
SCREAMING_SNAKE_CASE : Optional[int] = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE : List[Any] = int(self.size["shortest_edge"] * w / h )
else:
SCREAMING_SNAKE_CASE : Tuple = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE : Optional[int] = self.size["shortest_edge"]
else:
SCREAMING_SNAKE_CASE : Any = []
for image in image_inputs:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE : Optional[int] = max(a , key=lambda a : item[0] )[0]
SCREAMING_SNAKE_CASE : str = max(a , key=lambda a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =DeformableDetrImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = DeformableDetrImageProcessingTester(self )
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "image_mean" ) )
self.assertTrue(hasattr(a , "image_std" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "do_rescale" ) )
self.assertTrue(hasattr(a , "do_pad" ) )
self.assertTrue(hasattr(a , "size" ) )
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , a )
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=a )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , a )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor_tester.get_expected_values(a , batched=a )
SCREAMING_SNAKE_CASE : int = image_processing(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE : List[str] = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[int] = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE : Optional[int] = json.loads(f.read() )
SCREAMING_SNAKE_CASE : Optional[Any] = {"image_id": 3_9769, "annotations": target}
# encode them
SCREAMING_SNAKE_CASE : Optional[int] = DeformableDetrImageProcessor()
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(images=a , annotations=a , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE : List[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , a )
SCREAMING_SNAKE_CASE : int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , a , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE : str = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , a ) )
# verify boxes
SCREAMING_SNAKE_CASE : int = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , a )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , a , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : Dict = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , a ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , a ) )
# verify class_labels
SCREAMING_SNAKE_CASE : int = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , a ) )
# verify orig_size
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , a ) )
# verify size
SCREAMING_SNAKE_CASE : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , a ) )
@slow
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE : Dict = json.loads(f.read() )
SCREAMING_SNAKE_CASE : Tuple = {"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target}
SCREAMING_SNAKE_CASE : str = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
SCREAMING_SNAKE_CASE : Optional[Any] = DeformableDetrImageProcessor(format="coco_panoptic" )
SCREAMING_SNAKE_CASE : int = image_processing(images=a , annotations=a , masks_path=a , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE : Dict = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , a )
SCREAMING_SNAKE_CASE : str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , a , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE : str = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , a ) )
# verify boxes
SCREAMING_SNAKE_CASE : Dict = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , a )
SCREAMING_SNAKE_CASE : int = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , a , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : Any = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , a ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , a ) )
# verify class_labels
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , a ) )
# verify masks
SCREAMING_SNAKE_CASE : Optional[Any] = 82_2873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , a )
# verify orig_size
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , a ) )
# verify size
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , a ) ) | 25 | """simple docstring"""
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__A = logging.get_logger(__name__)
class _snake_case ( a__ ):
def __init__( self : Optional[Any] , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ):
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase ) | 646 | 0 |
"""simple docstring"""
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowerCamelCase_ ( lowercase ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = DistilBertTokenizer
_lowerCAmelCase : List[str] = DistilBertTokenizerFast
_lowerCAmelCase : Optional[int] = True
@slow
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
SCREAMING_SNAKE_CASE__ = tokenizer.encode("sequence builders" , add_special_tokens=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = tokenizer.encode("multi-sequence build" , add_special_tokens=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 112 |
"""simple docstring"""
import heapq
def __lowercase ( lowerCamelCase_ : dict ):
SCREAMING_SNAKE_CASE__ = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase_ , [-1 * len(lowerCamelCase_ ), (key, value)] )
# chosen_vertices = set of chosen vertices
SCREAMING_SNAKE_CASE__ = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
SCREAMING_SNAKE_CASE__ = heapq.heappop(lowerCamelCase_ )[1][0]
chosen_vertices.add(lowerCamelCase_ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
SCREAMING_SNAKE_CASE__ = elem[1][1].index(lowerCamelCase_ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase_ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 112 | 1 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class UpperCAmelCase :
a__: int
a__: int
class UpperCAmelCase :
def __init__( self : Union[str, Any] , lowerCAmelCase : int ):
lowercase : list[list[Edge]] = [[] for _ in range(lowerCAmelCase )]
lowercase : Union[str, Any] = size
def __getitem__( self : Any , lowerCAmelCase : int ):
return iter(self._graph[vertex] )
@property
def _lowerCAmelCase ( self : Dict ):
return self._size
def _lowerCAmelCase ( self : str , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int ):
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCAmelCase , lowerCAmelCase ) )
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase : int , lowerCAmelCase : int ):
lowercase : Optional[int] = deque([start_vertex] )
lowercase : list[int | None] = [None] * self.size
lowercase : str = 0
while queue:
lowercase : List[str] = queue.popleft()
lowercase : List[Any] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowercase : Optional[int] = current_distance + edge.weight
lowercase : Tuple = distances[edge.destination_vertex]
if (
isinstance(lowerCAmelCase , lowerCAmelCase )
and new_distance >= dest_vertex_distance
):
continue
lowercase : Any = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 583 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
snake_case__ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase )
class UpperCAmelCase ( __lowerCamelCase ):
def __init__( self : List[Any] , **lowerCAmelCase : List[Any] ):
super().__init__(**lowerCAmelCase )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self : List[Any] , lowerCAmelCase : Union[np.ndarray, bytes, str] , **lowerCAmelCase : Tuple ):
return super().__call__(lowerCAmelCase , **lowerCAmelCase )
def _lowerCAmelCase ( self : str , **lowerCAmelCase : Optional[int] ):
lowercase : List[Any] = {}
if "candidate_labels" in kwargs:
lowercase : Union[str, Any] = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
lowercase : Any = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def _lowerCAmelCase ( self : int , lowerCAmelCase : Dict , lowerCAmelCase : int=None , lowerCAmelCase : List[Any]="This is a sound of {}." ):
if isinstance(lowerCAmelCase , lowerCAmelCase ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
lowercase : str = requests.get(lowerCAmelCase ).content
else:
with open(lowerCAmelCase , '''rb''' ) as f:
lowercase : List[Any] = f.read()
if isinstance(lowerCAmelCase , lowerCAmelCase ):
lowercase : Any = ffmpeg_read(lowerCAmelCase , self.feature_extractor.sampling_rate )
if not isinstance(lowerCAmelCase , np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
lowercase : str = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='''pt''' )
lowercase : Any = candidate_labels
lowercase : Optional[int] = [hypothesis_template.format(lowerCAmelCase ) for x in candidate_labels]
lowercase : Tuple = self.tokenizer(lowerCAmelCase , return_tensors=self.framework , padding=lowerCAmelCase )
lowercase : Optional[Any] = [text_inputs]
return inputs
def _lowerCAmelCase ( self : str , lowerCAmelCase : Union[str, Any] ):
lowercase : Union[str, Any] = model_inputs.pop('''candidate_labels''' )
lowercase : Union[str, Any] = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , lowerCAmelCase ):
lowercase : int = text_inputs[0]
else:
# Batching case.
lowercase : Dict = text_inputs[0][0]
lowercase : str = self.model(**lowerCAmelCase , **lowerCAmelCase )
lowercase : Tuple = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase : str ):
lowercase : Optional[int] = model_outputs.pop('''candidate_labels''' )
lowercase : Any = model_outputs['''logits'''][0]
if self.framework == "pt":
lowercase : str = logits.softmax(dim=0 )
lowercase : List[Any] = probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
lowercase : str = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowerCAmelCase , lowerCAmelCase ) , key=lambda lowerCAmelCase : -x[0] )
]
return result
| 583 | 1 |
def __lowerCAmelCase ( snake_case : int ) -> bool:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__lowerCamelCase: int = f'Input value of [number={number}] must be an integer'
raise TypeError(__UpperCAmelCase )
if number < 0:
return False
__lowerCamelCase: Dict = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
from manim import *
class a ( _UpperCAmelCase ):
def SCREAMING_SNAKE_CASE__ ( self : int ):
__lowerCamelCase: int = Rectangle(height=0.5 , width=0.5 )
__lowerCamelCase: List[str] = Rectangle(height=0.25 , width=0.25 )
__lowerCamelCase: Optional[int] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__lowerCamelCase: str = [mem.copy() for i in range(6 )]
__lowerCamelCase: Dict = [mem.copy() for i in range(6 )]
__lowerCamelCase: Union[str, Any] = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__lowerCamelCase: Tuple = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__lowerCamelCase: Optional[int] = VGroup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__lowerCamelCase: Dict = Text("""CPU""" , font_size=24 )
__lowerCamelCase: Any = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Optional[int] = [mem.copy() for i in range(4 )]
__lowerCamelCase: Optional[int] = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__lowerCamelCase: Dict = Text("""GPU""" , font_size=24 )
__lowerCamelCase: Dict = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
gpu.move_to([-1, -1, 0] )
self.add(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Any = [mem.copy() for i in range(6 )]
__lowerCamelCase: Dict = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__lowerCamelCase: List[Any] = Text("""Model""" , font_size=24 )
__lowerCamelCase: Any = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
model.move_to([3, -1.0, 0] )
self.add(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Tuple = []
__lowerCamelCase: Any = []
__lowerCamelCase: int = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE_ ):
rect.set_stroke(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: int = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(SCREAMING_SNAKE_CASE_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=SCREAMING_SNAKE_CASE_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=SCREAMING_SNAKE_CASE_ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=SCREAMING_SNAKE_CASE_ , buff=0.0 )
self.add(SCREAMING_SNAKE_CASE_ )
model_cpu_arr.append(SCREAMING_SNAKE_CASE_ )
self.add(*SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[Any] = [mem.copy() for i in range(6 )]
__lowerCamelCase: Any = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__lowerCamelCase: Tuple = Text("""Loaded Checkpoint""" , font_size=24 )
__lowerCamelCase: Tuple = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
checkpoint.move_to([3, 0.5, 0] )
self.add(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[Any] = []
__lowerCamelCase: Optional[int] = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase: Optional[int] = fill.copy().set_fill(SCREAMING_SNAKE_CASE_ , opacity=0.7 )
target.move_to(SCREAMING_SNAKE_CASE_ )
ckpt_arr.append(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(SCREAMING_SNAKE_CASE_ )
self.add(*SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowerCamelCase: Dict = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Optional[int] = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(SCREAMING_SNAKE_CASE_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Tuple = MarkupText(
F'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
__lowerCamelCase: List[Any] = [meta_mem.copy() for i in range(6 )]
__lowerCamelCase: Optional[int] = [meta_mem.copy() for i in range(6 )]
__lowerCamelCase: str = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__lowerCamelCase: Optional[int] = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__lowerCamelCase: List[str] = VGroup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__lowerCamelCase: Dict = Text("""Disk""" , font_size=24 )
__lowerCamelCase: Any = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(SCREAMING_SNAKE_CASE_ , run_time=3 ) , Write(SCREAMING_SNAKE_CASE_ , run_time=1 ) , Create(SCREAMING_SNAKE_CASE_ , run_time=1 ) )
__lowerCamelCase: int = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase: Optional[int] = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(SCREAMING_SNAKE_CASE_ , run_time=1.5 ) )
self.play(*SCREAMING_SNAKE_CASE_ )
self.play(FadeOut(SCREAMING_SNAKE_CASE_ ) )
__lowerCamelCase: List[Any] = MarkupText(F'Then, the checkpoint is removed from memory\nthrough garbage collection.' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(SCREAMING_SNAKE_CASE_ , run_time=3 ) )
self.play(
FadeOut(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ) , )
self.wait()
| 189 | 0 |
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
A_ : Tuple = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def UpperCamelCase ( __lowercase : int = 50_00 ):
'''simple docstring'''
A_ : str = [(i * (3 * i - 1)) // 2 for i in range(1 ,lowercase_ )]
for i, pentagonal_i in enumerate(lowercase_ ):
for j in range(lowercase_ ,len(lowercase_ ) ):
A_ : Optional[int] = pentagonal_nums[j]
A_ : Optional[int] = pentagonal_i + pentagonal_j
A_ : Any = pentagonal_j - pentagonal_i
if is_pentagonal(lowercase_ ) and is_pentagonal(lowercase_ ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 558 |
'''simple docstring'''
def UpperCamelCase ( lowercase_ : int , lowercase_ : int ) -> str:
'''simple docstring'''
return "\n".join(
f'{number} * {i} = {number * i}' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 72 | 0 |
"""simple docstring"""
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
# A mock response for an HTTP head request to emulate server down
__A = mock.Mock()
__A = 5_00
__A = {}
__A = HTTPError
__A = {}
# Download this model to make sure it's in the cache.
__A = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''', return_value=_lowerCamelCase ) as mock_head:
__A = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
# A mock response for an HTTP head request to emulate server down
__A = mock.Mock()
__A = 5_00
__A = {}
__A = HTTPError
__A = {}
# Download this model to make sure it's in the cache.
__A = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''', return_value=_lowerCamelCase ) as mock_head:
__A = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# This check we did call the fake head request
mock_head.assert_called()
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
# This test is for deprecated behavior and can be removed in v5
try:
__A = tempfile.mktemp()
with open(_lowerCamelCase, '''wb''' ) as f:
http_get('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', _lowerCamelCase )
__A = AlbertTokenizer.from_pretrained(_lowerCamelCase )
finally:
os.remove(_lowerCamelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('''tokenizer.json''' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('''tokenizer.json''', '''wb''' ) as f:
http_get('''https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json''', _lowerCamelCase )
__A = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size, 10_00 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('''tokenizer.json''' )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
# This test is for deprecated behavior and can be removed in v5
__A = AlbertTokenizer.from_pretrained('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' )
@is_staging_test
class snake_case ( unittest.TestCase ):
'''simple docstring'''
A_ : Union[str, Any] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] ):
'''simple docstring'''
__A = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='''test-tokenizer''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''valid_org/test-tokenizer-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''test-dynamic-tokenizer''' )
except HTTPError:
pass
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__A = os.path.join(_lowerCamelCase, '''vocab.txt''' )
with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
__A = BertTokenizer(_lowerCamelCase )
tokenizer.push_to_hub('''test-tokenizer''', use_auth_token=self._token )
__A = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
# Reset repo
delete_repo(token=self._token, repo_id='''test-tokenizer''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCamelCase, repo_id='''test-tokenizer''', push_to_hub=_lowerCamelCase, use_auth_token=self._token )
__A = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__A = os.path.join(_lowerCamelCase, '''vocab.txt''' )
with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
__A = BertTokenizer(_lowerCamelCase )
tokenizer.push_to_hub('''valid_org/test-tokenizer-org''', use_auth_token=self._token )
__A = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
# Reset repo
delete_repo(token=self._token, repo_id='''valid_org/test-tokenizer-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
_lowerCamelCase, repo_id='''valid_org/test-tokenizer-org''', push_to_hub=_lowerCamelCase, use_auth_token=self._token )
__A = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
@require_tokenizers
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__A = os.path.join(_lowerCamelCase, '''vocab.txt''' )
with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
__A = CustomTokenizer(_lowerCamelCase )
# No fast custom tokenizer
tokenizer.push_to_hub('''test-dynamic-tokenizer''', use_auth_token=self._token )
__A = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer', trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, '''CustomTokenizer''' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__A = os.path.join(_lowerCamelCase, '''vocab.txt''' )
with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
__A = BertTokenizerFast.from_pretrained(_lowerCamelCase )
bert_tokenizer.save_pretrained(_lowerCamelCase )
__A = CustomTokenizerFast.from_pretrained(_lowerCamelCase )
tokenizer.push_to_hub('''test-dynamic-tokenizer''', use_auth_token=self._token )
__A = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer', trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, '''CustomTokenizerFast''' )
__A = AutoTokenizer.from_pretrained(
f'{USER}/test-dynamic-tokenizer', use_fast=_lowerCamelCase, trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, '''CustomTokenizer''' )
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = Trie()
trie.add('''Hello 友達''' )
self.assertEqual(trie.data, {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
trie.add('''Hello''' )
trie.data
self.assertEqual(trie.data, {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {'''''': 1, ''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = Trie()
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ), ['''[CLS] This is a extra_id_100'''] )
trie.add('''[CLS]''' )
trie.add('''extra_id_1''' )
trie.add('''extra_id_100''' )
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ), ['''[CLS]''', ''' This is a ''', '''extra_id_100'''] )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
__A = Trie()
trie.add('''A''' )
self.assertEqual(trie.split('''ABC''' ), ['''A''', '''BC'''] )
self.assertEqual(trie.split('''BCA''' ), ['''BC''', '''A'''] )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = Trie()
trie.add('''TOKEN]''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ), ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = Trie()
trie.add('''A''' )
trie.add('''P''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ), ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = Trie()
trie.add('''AB''' )
trie.add('''B''' )
trie.add('''C''' )
self.assertEqual(trie.split('''ABC''' ), ['''AB''', '''C'''] )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
__A = Trie()
trie.add('''ABC''' )
trie.add('''B''' )
trie.add('''CD''' )
self.assertEqual(trie.split('''ABCD''' ), ['''ABC''', '''D'''] )
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
# Even if the offsets are wrong, we necessarily output correct string
# parts.
__A = Trie()
__A = trie.cut_text('''ABC''', [0, 0, 2, 1, 2, 3] )
self.assertEqual(_lowerCamelCase, ['''AB''', '''C'''] )
| 215 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = original_name.split('''.''' )[0]
__A = key.split('''.''' )
__A = int(key_list[key_list.index(__UpperCamelCase ) - 2] )
__A = int(key_list[key_list.index(__UpperCamelCase ) - 1] )
__A = orig_block_num - offset
__A = key.replace(f'{orig_block_num}.{layer_num}.{original_name}' , f'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = OrderedDict()
__A , __A = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
__A = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
__A = key[: key.find('''proj''' )]
__A = key.replace(__UpperCamelCase , f'patch_embeddings.{total_embed_found}.' )
__A = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
__A = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
__A = replace_key_with_offset(__UpperCamelCase , __UpperCamelCase , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
__A = replace_key_with_offset(__UpperCamelCase , __UpperCamelCase , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
__A = replace_key_with_offset(__UpperCamelCase , __UpperCamelCase , '''norm1''' , '''before_norm''' )
if "norm2" in key:
__A = replace_key_with_offset(__UpperCamelCase , __UpperCamelCase , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
__A = replace_key_with_offset(__UpperCamelCase , __UpperCamelCase , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
__A = replace_key_with_offset(__UpperCamelCase , __UpperCamelCase , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
__A = key.replace('''head''' , '''classifier''' )
__A = value
return new_state_dict
def lowerCAmelCase ( ):
"""simple docstring"""
__A = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__A = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return image
@torch.no_grad()
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = PoolFormerConfig()
# set attributes based on model_name
__A = '''huggingface/label-files'''
__A = model_name[-3:]
__A = 1_0_0_0
__A = '''imagenet-1k-id2label.json'''
__A = (1, 1_0_0_0)
# set config attributes
__A = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
__A = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
__A = idalabel
__A = {v: k for k, v in idalabel.items()}
if size == "s12":
__A = [2, 2, 6, 2]
__A = [6_4, 1_2_8, 3_2_0, 5_1_2]
__A = 4.0
__A = 0.9
elif size == "s24":
__A = [4, 4, 1_2, 4]
__A = [6_4, 1_2_8, 3_2_0, 5_1_2]
__A = 4.0
__A = 0.9
elif size == "s36":
__A = [6, 6, 1_8, 6]
__A = [6_4, 1_2_8, 3_2_0, 5_1_2]
__A = 4.0
__A = 1e-6
__A = 0.9
elif size == "m36":
__A = [6, 6, 1_8, 6]
__A = [9_6, 1_9_2, 3_8_4, 7_6_8]
__A = 4.0
__A = 1e-6
__A = 0.95
elif size == "m48":
__A = [8, 8, 2_4, 8]
__A = [9_6, 1_9_2, 3_8_4, 7_6_8]
__A = 4.0
__A = 1e-6
__A = 0.95
else:
raise ValueError(f'Size {size} not supported' )
# load image processor
__A = PoolFormerImageProcessor(crop_pct=__UpperCamelCase )
# Prepare image
__A = prepare_img()
__A = image_processor(images=__UpperCamelCase , return_tensors='''pt''' ).pixel_values
logger.info(f'Converting model {model_name}...' )
# load original state dict
__A = torch.load(__UpperCamelCase , map_location=torch.device('''cpu''' ) )
# rename keys
__A = rename_keys(__UpperCamelCase )
# create HuggingFace model and load state dict
__A = PoolFormerForImageClassification(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
# Define image processor
__A = PoolFormerImageProcessor(crop_pct=__UpperCamelCase )
__A = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
__A = model(__UpperCamelCase )
__A = outputs.logits
# define expected logit slices for different models
if size == "s12":
__A = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
__A = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
__A = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
__A = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
__A = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1e-2 )
# finally, save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
lowercase_ = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 215 | 1 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(lowerCAmelCase__ , n - 1 , lowerCAmelCase__ ) * a) % mod
else:
UpperCAmelCase_ = binary_exponentiation(lowerCAmelCase__ , n / 2 , lowerCAmelCase__ )
return (b * b) % mod
# a prime number
lowerCamelCase = 701
lowerCamelCase = 1_000_000_000
lowerCamelCase = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 82 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Union[str, Any] = logging.get_logger(__name__)
snake_case__ : Tuple = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = "unispeech-sat"
def __init__( self : List[str] , __a : Dict=32 , __a : int=768 , __a : int=12 , __a : Tuple=12 , __a : Optional[int]=3_072 , __a : int="gelu" , __a : Optional[int]=0.1 , __a : Union[str, Any]=0.1 , __a : Any=0.1 , __a : str=0.0 , __a : List[str]=0.0 , __a : int=0.1 , __a : List[Any]=0.1 , __a : Tuple=0.02 , __a : str=1e-5 , __a : Optional[int]="group" , __a : Any="gelu" , __a : Optional[Any]=(512, 512, 512, 512, 512, 512, 512) , __a : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , __a : List[Any]=(10, 3, 3, 3, 3, 2, 2) , __a : Optional[Any]=False , __a : Optional[int]=128 , __a : Dict=16 , __a : str=False , __a : List[Any]=True , __a : Tuple=0.05 , __a : str=10 , __a : Any=2 , __a : Any=0.0 , __a : List[Any]=10 , __a : str=0 , __a : str=320 , __a : int=2 , __a : Optional[Any]=0.1 , __a : Optional[int]=100 , __a : Any=256 , __a : Optional[int]=256 , __a : int=0.1 , __a : Tuple="mean" , __a : List[str]=False , __a : Optional[Any]=False , __a : List[Any]=256 , __a : Dict=(512, 512, 512, 512, 1_500) , __a : int=(5, 3, 3, 1, 1) , __a : str=(1, 2, 3, 1, 1) , __a : str=512 , __a : str=0 , __a : List[Any]=1 , __a : Optional[Any]=2 , __a : Optional[int]=504 , **__a : Tuple , ) ->Dict:
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a )
lowerCamelCase_ : Any = hidden_size
lowerCamelCase_ : Optional[int] = feat_extract_norm
lowerCamelCase_ : Dict = feat_extract_activation
lowerCamelCase_ : List[Any] = list(__a )
lowerCamelCase_ : Any = list(__a )
lowerCamelCase_ : List[Any] = list(__a )
lowerCamelCase_ : str = conv_bias
lowerCamelCase_ : int = num_conv_pos_embeddings
lowerCamelCase_ : List[Any] = num_conv_pos_embedding_groups
lowerCamelCase_ : str = len(self.conv_dim )
lowerCamelCase_ : Any = num_hidden_layers
lowerCamelCase_ : List[str] = intermediate_size
lowerCamelCase_ : Tuple = hidden_act
lowerCamelCase_ : List[str] = num_attention_heads
lowerCamelCase_ : Any = hidden_dropout
lowerCamelCase_ : Optional[int] = attention_dropout
lowerCamelCase_ : Tuple = activation_dropout
lowerCamelCase_ : str = feat_proj_dropout
lowerCamelCase_ : Any = final_dropout
lowerCamelCase_ : Optional[Any] = layerdrop
lowerCamelCase_ : str = layer_norm_eps
lowerCamelCase_ : str = initializer_range
lowerCamelCase_ : Optional[Any] = vocab_size
lowerCamelCase_ : Optional[int] = num_clusters
lowerCamelCase_ : List[Any] = do_stable_layer_norm
lowerCamelCase_ : Tuple = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase_ : List[str] = apply_spec_augment
lowerCamelCase_ : str = mask_time_prob
lowerCamelCase_ : List[Any] = mask_time_length
lowerCamelCase_ : Optional[int] = mask_time_min_masks
lowerCamelCase_ : List[Any] = mask_feature_prob
lowerCamelCase_ : Optional[int] = mask_feature_length
lowerCamelCase_ : Dict = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCamelCase_ : Dict = num_codevectors_per_group
lowerCamelCase_ : List[str] = num_codevector_groups
lowerCamelCase_ : Union[str, Any] = contrastive_logits_temperature
lowerCamelCase_ : Optional[int] = feat_quantizer_dropout
lowerCamelCase_ : List[Any] = num_negatives
lowerCamelCase_ : Optional[int] = codevector_dim
lowerCamelCase_ : str = proj_codevector_dim
lowerCamelCase_ : List[Any] = diversity_loss_weight
# ctc loss
lowerCamelCase_ : Optional[int] = ctc_loss_reduction
lowerCamelCase_ : int = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCamelCase_ : Dict = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCamelCase_ : List[Any] = list(__a )
lowerCamelCase_ : Any = list(__a )
lowerCamelCase_ : List[Any] = list(__a )
lowerCamelCase_ : Optional[int] = xvector_output_dim
@property
def _lowerCAmelCase ( self : int ) ->Optional[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 278 | 0 |
def UpperCAmelCase_ ( snake_case__ ) -> str:
"""simple docstring"""
return " ".join(
''.join(word[::-1] ) if len(snake_case__ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 604 |
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> float:
"""simple docstring"""
_validate_point(snake_case__ )
_validate_point(snake_case__ )
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(a - b ) for a, b in zip(snake_case__ , snake_case__ ) ) )
def UpperCAmelCase_ ( snake_case__ ) -> None:
"""simple docstring"""
if point:
if isinstance(snake_case__ , snake_case__ ):
for item in point:
if not isinstance(snake_case__ , (int, float) ):
lowerCAmelCase__ = (
'Expected a list of numbers as input, found '
f'{type(snake_case__ ).__name__}'
)
raise TypeError(snake_case__ )
else:
lowerCAmelCase__ = f'Expected a list of numbers as input, found {type(snake_case__ ).__name__}'
raise TypeError(snake_case__ )
else:
raise ValueError('Missing an input' )
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> float:
"""simple docstring"""
_validate_point(snake_case__ )
_validate_point(snake_case__ )
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(x - y ) for x, y in zip(snake_case__ , snake_case__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 604 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.