code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from collections import deque
def UpperCAmelCase_ ( __snake_case ) -> List[Any]:
"""simple docstring"""
_lowercase =len(__snake_case )
_lowercase =deque()
_lowercase =[False for _ in range(__snake_case )]
_lowercase =[-1 for _ in range(__snake_case )]
_lowercase =index_of[:]
def strong_connect(__snake_case , __snake_case , __snake_case ):
_lowercase =index # the number when this node is seen
_lowercase =index # lowest rank node reachable from here
index += 1
stack.append(__snake_case )
_lowercase =True
for w in g[v]:
if index_of[w] == -1:
_lowercase =strong_connect(__snake_case , __snake_case , __snake_case )
_lowercase =(
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
_lowercase =(
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
_lowercase =[]
_lowercase =stack.pop()
_lowercase =False
component.append(__snake_case )
while w != v:
_lowercase =stack.pop()
_lowercase =False
component.append(__snake_case )
components.append(__snake_case )
return index
_lowercase =[]
for v in range(__snake_case ):
if index_of[v] == -1:
strong_connect(__snake_case , 0 , __snake_case )
return components
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Optional[int]:
"""simple docstring"""
_lowercase =[[] for _ in range(__snake_case )]
for u, v in edges:
g[u].append(__snake_case )
return g
if __name__ == "__main__":
# Test
UpperCAmelCase__ = 7
UpperCAmelCase__ = [0, 0, 1, 2, 3, 3, 4, 4, 6]
UpperCAmelCase__ = [1, 3, 2, 0, 1, 4, 5, 6, 5]
UpperCAmelCase__ = [(u, v) for u, v in zip(source, target)]
UpperCAmelCase__ = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 5 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return 12
@property
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return 12
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(a__ )
@property
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ = 12
snake_case_ = 12
snake_case_ = {
"attention_bias": True,
"cross_attention_dim": 32,
"attention_head_dim": height * width,
"num_attention_heads": 1,
"num_vector_embeds": self.num_embed,
"num_embeds_ada_norm": self.num_embeds_ada_norm,
"norm_num_groups": 32,
"sample_size": width,
"activation_fn": "geglu-approximate",
}
snake_case_ = TransformeraDModel(**a__ )
return model
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = "cpu"
snake_case_ = self.dummy_vqvae
snake_case_ = self.dummy_text_encoder
snake_case_ = self.dummy_tokenizer
snake_case_ = self.dummy_transformer
snake_case_ = VQDiffusionScheduler(self.num_embed )
snake_case_ = LearnedClassifierFreeSamplingEmbeddings(learnable=a__ )
snake_case_ = VQDiffusionPipeline(
vqvae=a__ , text_encoder=a__ , tokenizer=a__ , transformer=a__ , scheduler=a__ , learned_classifier_free_sampling_embeddings=a__ , )
snake_case_ = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
snake_case_ = "teddy bear playing in the pool"
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipe([prompt] , generator=a__ , num_inference_steps=2 , output_type="np" )
snake_case_ = output.images
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipe(
[prompt] , generator=a__ , output_type="np" , return_dict=a__ , num_inference_steps=2 )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case_ = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = "cpu"
snake_case_ = self.dummy_vqvae
snake_case_ = self.dummy_text_encoder
snake_case_ = self.dummy_tokenizer
snake_case_ = self.dummy_transformer
snake_case_ = VQDiffusionScheduler(self.num_embed )
snake_case_ = LearnedClassifierFreeSamplingEmbeddings(
learnable=a__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
snake_case_ = VQDiffusionPipeline(
vqvae=a__ , text_encoder=a__ , tokenizer=a__ , transformer=a__ , scheduler=a__ , learned_classifier_free_sampling_embeddings=a__ , )
snake_case_ = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
snake_case_ = "teddy bear playing in the pool"
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipe([prompt] , generator=a__ , num_inference_steps=2 , output_type="np" )
snake_case_ = output.images
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipe(
[prompt] , generator=a__ , output_type="np" , return_dict=a__ , num_inference_steps=2 )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case_ = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy" )
snake_case_ = VQDiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq" )
snake_case_ = pipeline.to(a__ )
pipeline.set_progress_bar_config(disable=a__ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipeline(
"teddy bear playing in the pool" , num_images_per_prompt=1 , generator=a__ , output_type="np" , )
snake_case_ = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 85 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
torch.manual_seed(0 )
_UpperCamelCase : Optional[int] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
torch.manual_seed(0 )
_UpperCamelCase : Dict = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , )
return model
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
torch.manual_seed(0 )
_UpperCamelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(__A )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
_UpperCamelCase : str = self.dummy_uncond_unet
_UpperCamelCase : str = DDIMScheduler()
_UpperCamelCase : Optional[int] = self.dummy_vq_model
_UpperCamelCase : Tuple = LDMPipeline(unet=__A , vqvae=__A , scheduler=__A )
ldm.to(__A )
ldm.set_progress_bar_config(disable=__A )
_UpperCamelCase : Optional[int] = torch.manual_seed(0 )
_UpperCamelCase : Optional[int] = ldm(generator=__A , num_inference_steps=2 , output_type="numpy" ).images
_UpperCamelCase : Optional[int] = torch.manual_seed(0 )
_UpperCamelCase : Tuple = ldm(generator=__A , num_inference_steps=2 , output_type="numpy" , return_dict=__A )[0]
_UpperCamelCase : int = image[0, -3:, -3:, -1]
_UpperCamelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase : Dict = np.array([0.85_12, 0.8_18, 0.64_11, 0.68_08, 0.44_65, 0.56_18, 0.46, 0.62_31, 0.51_72] )
_UpperCamelCase : Any = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
_UpperCamelCase : str = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" )
ldm.to(__A )
ldm.set_progress_bar_config(disable=__A )
_UpperCamelCase : Optional[int] = torch.manual_seed(0 )
_UpperCamelCase : Any = ldm(generator=__A , num_inference_steps=5 , output_type="numpy" ).images
_UpperCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_UpperCamelCase : Dict = np.array([0.43_99, 0.4_49_75, 0.4_68_25, 0.4_74, 0.43_59, 0.45_81, 0.4_50_95, 0.43_41, 0.44_47] )
_UpperCamelCase : Optional[int] = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 351 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Optional[int] = -1
_UpperCamelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCamelCase : Any = TextStreamer(__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase : Optional[int] = cs.out[:-1]
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Tuple = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Dict = -1
_UpperCamelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : List[str] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : Optional[int] = tokenizer.decode(greedy_ids[0] )
_UpperCamelCase : Tuple = TextIteratorStreamer(__a )
_UpperCamelCase : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase : Optional[Any] = Thread(target=model.generate , kwargs=__a )
thread.start()
_UpperCamelCase : Tuple = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : int = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Union[str, Any] = -1
_UpperCamelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : str = greedy_ids[:, input_ids.shape[1] :]
_UpperCamelCase : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCamelCase : Optional[int] = TextStreamer(__a , skip_prompt=__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase : Tuple = cs.out[:-1]
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained("distilgpt2" )
_UpperCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(__a )
_UpperCamelCase : int = -1
_UpperCamelCase : Any = torch.ones((1, 5) , device=__a ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCamelCase : List[str] = TextStreamer(__a , skip_special_tokens=__a )
model.generate(__a , max_new_tokens=1 , do_sample=__a , streamer=__a )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCamelCase : int = cs.out[:-1] # Remove the final "\n"
_UpperCamelCase : int = tokenizer(__a , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Optional[Any] = -1
_UpperCamelCase : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Any = TextIteratorStreamer(__a , timeout=0.0_01 )
_UpperCamelCase : Optional[int] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase : List[Any] = Thread(target=model.generate , kwargs=__a )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__a ):
_UpperCamelCase : List[str] = ""
for new_text in streamer:
streamer_text += new_text
| 310 | 0 |
__UpperCAmelCase = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__UpperCAmelCase = [{"type": "code", "content": INSTALL_CONTENT}]
__UpperCAmelCase = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 299 |
from cva import destroyAllWindows, imread, imshow, waitKey
def A__ ( __lowerCamelCase ):
# getting number of pixels in the image
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = [2_55, 2_55, 2_55] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__UpperCAmelCase = imread("image_data/lena.jpg", 1)
# convert to its negative
__UpperCAmelCase = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 299 | 1 |
'''simple docstring'''
lowercase =[0, 2, 4, 6, 8]
lowercase =[1, 3, 5, 7, 9]
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[int] , __lowerCamelCase : int ):
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 1_0
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
_UpperCAmelCase : Union[str, Any] =0
for digit in range(1_0 ):
_UpperCAmelCase : str =digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 1_0 , __lowerCamelCase , __lowerCamelCase )
return result
_UpperCAmelCase : Optional[Any] =0
for digita in range(1_0 ):
_UpperCAmelCase : Any =digita
if (remainder + digita) % 2 == 0:
_UpperCAmelCase : Optional[int] =ODD_DIGITS
else:
_UpperCAmelCase : Union[str, Any] =EVEN_DIGITS
for digita in other_parity_digits:
_UpperCAmelCase : int =digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 1_0 , __lowerCamelCase , __lowerCamelCase , )
return result
def lowerCamelCase__ ( __lowerCamelCase : int = 9 ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] =0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(__lowerCamelCase , 0 , [0] * length , __lowerCamelCase )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 242 |
'''simple docstring'''
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : list[int] , __lowerCamelCase : int ):
'''simple docstring'''
def count_of_possible_combinations(__lowerCamelCase : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : list[int] , __lowerCamelCase : int ):
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
__lowerCamelCase : int , __lowerCamelCase : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_UpperCAmelCase : str =sum(
count_of_possible_combinations_with_dp_array(target - item , __lowerCamelCase )
for item in array )
_UpperCAmelCase : Optional[Any] =answer
return answer
_UpperCAmelCase : int =[-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : list[int] , __lowerCamelCase : int ):
'''simple docstring'''
_UpperCAmelCase : Tuple =[0] * (target + 1)
_UpperCAmelCase : Dict =1
for i in range(1 , target + 1 ):
for j in range(__lowerCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase =3
lowercase =5
lowercase =[1, 2, 5]
print(combination_sum_iv(n, array, target))
| 242 | 1 |
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
| 38 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''',
'''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''',
'''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
UpperCAmelCase_ : int = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Optional[int] ) -> Dict:
"""simple docstring"""
for attribute in key.split(""".""" ):
UpperCamelCase :Dict = getattr(__magic_name__ , __magic_name__ )
if weight_type is not None:
UpperCamelCase :Optional[int] = getattr(__magic_name__ , __magic_name__ ).shape
else:
UpperCamelCase :Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCamelCase :str = value
elif weight_type == "weight_g":
UpperCamelCase :int = value
elif weight_type == "weight_v":
UpperCamelCase :int = value
elif weight_type == "bias":
UpperCamelCase :List[Any] = value
else:
UpperCamelCase :Any = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] , __magic_name__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase :Union[str, Any] = []
UpperCamelCase :Dict = fairseq_model.state_dict()
UpperCamelCase :int = hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase :str = False
if "conv_layers" in name:
load_conv_layer(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == """group""" , )
UpperCamelCase :Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
UpperCamelCase :Optional[int] = True
if "*" in mapped_key:
UpperCamelCase :List[Any] = name.split(__magic_name__ )[0].split(""".""" )[-2]
UpperCamelCase :int = mapped_key.replace("""*""" , __magic_name__ )
if "weight_g" in name:
UpperCamelCase :List[Any] = """weight_g"""
elif "weight_v" in name:
UpperCamelCase :List[Any] = """weight_v"""
elif "bias" in name and "relative_attention_bias" not in name:
UpperCamelCase :Any = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase :List[str] = """weight"""
else:
UpperCamelCase :Optional[int] = None
set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
continue
if not is_used:
unused_weights.append(__magic_name__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any , __magic_name__ : str , __magic_name__ : int , __magic_name__ : int , __magic_name__ : List[str] ) -> Dict:
"""simple docstring"""
UpperCamelCase :Dict = full_name.split("""conv_layers.""" )[-1]
UpperCamelCase :int = name.split(""".""" )
UpperCamelCase :str = int(items[0] )
UpperCamelCase :str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCamelCase :Tuple = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCamelCase :Dict = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCamelCase :Tuple = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCamelCase :Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__magic_name__ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : str=None ) -> int:
"""simple docstring"""
UpperCamelCase :List[Any] = torch.load(__magic_name__ )
UpperCamelCase :List[Any] = WavLMConfigOrig(checkpoint["""cfg"""] )
UpperCamelCase :int = WavLMOrig(__magic_name__ )
model.load_state_dict(checkpoint["""model"""] )
model.eval()
if config_path is not None:
UpperCamelCase :List[Any] = WavLMConfig.from_pretrained(__magic_name__ )
else:
UpperCamelCase :Any = WavLMConfig()
UpperCamelCase :Dict = WavLMModel(__magic_name__ )
recursively_load_weights(__magic_name__ , __magic_name__ )
hf_wavlm.save_pretrained(__magic_name__ )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 38 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase_ : Any = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase_ : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def __a ( _UpperCamelCase: str , _UpperCamelCase: Union[str, Any]=100 , _UpperCamelCase: List[str]=" " ) -> List[str]:
"""simple docstring"""
_snake_case = text.split(_UpperCamelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase )]
def __a ( _UpperCamelCase: dict ) -> dict:
"""simple docstring"""
_snake_case , _snake_case = [], []
for title, text in zip(documents["title"] , documents["text"] ):
if text is not None:
for passage in split_text(_UpperCamelCase ):
titles.append(title if title is not None else "" )
texts.append(_UpperCamelCase )
return {"title": titles, "text": texts}
def __a ( _UpperCamelCase: dict , _UpperCamelCase: DPRContextEncoder , _UpperCamelCase: DPRContextEncoderTokenizerFast ) -> dict:
"""simple docstring"""
_snake_case = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=_UpperCamelCase , padding="longest" , return_tensors="pt" )["input_ids"]
_snake_case = ctx_encoder(input_ids.to(device=_UpperCamelCase ) , return_dict=_UpperCamelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __a ( _UpperCamelCase: "RagExampleArguments" , _UpperCamelCase: "ProcessingArguments" , _UpperCamelCase: "IndexHnswArguments" , ) -> Dict:
"""simple docstring"""
logger.info("Step 1 - Create the dataset" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
_snake_case = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
_snake_case = dataset.map(_UpperCamelCase , batched=_UpperCamelCase , num_proc=processing_args.num_proc )
# And compute the embeddings
_snake_case = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_UpperCamelCase )
_snake_case = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
_snake_case = Features(
{"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space
_snake_case = dataset.map(
partial(_UpperCamelCase , ctx_encoder=_UpperCamelCase , ctx_tokenizer=_UpperCamelCase ) , batched=_UpperCamelCase , batch_size=processing_args.batch_size , features=_UpperCamelCase , )
# And finally save your dataset
_snake_case = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" )
dataset.save_to_disk(_UpperCamelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
_snake_case = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("embeddings" , custom_index=_UpperCamelCase )
# And save the index
_snake_case = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" )
dataset.get_index("embeddings" ).save(_UpperCamelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class _a :
SCREAMING_SNAKE_CASE_ : str = field(
default=str(Path(__lowerCAmelCase ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ) , metadata={"""help""": """Path to a tab-separated csv file with columns 'title' and 'text'"""} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=__lowerCAmelCase , metadata={"""help""": """Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."""} , )
SCREAMING_SNAKE_CASE_ : str = field(
default="""facebook/rag-sequence-nq""" , metadata={"""help""": """The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"""} , )
SCREAMING_SNAKE_CASE_ : str = field(
default="""facebook/dpr-ctx_encoder-multiset-base""" , metadata={
"""help""": (
"""The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"""
""" 'facebook/dpr-ctx_encoder-multiset-base'"""
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=str(Path(__lowerCAmelCase ).parent / """test_run""" / """dummy-kb""" ) , metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""} , )
@dataclass
class _a :
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=__lowerCAmelCase , metadata={
"""help""": """The number of processes to use to split the documents into passages. Default is single process."""
} , )
SCREAMING_SNAKE_CASE_ : int = field(
default=16 , metadata={
"""help""": """The batch size to use when computing the passages embeddings using the DPR context encoder."""
} , )
@dataclass
class _a :
SCREAMING_SNAKE_CASE_ : int = field(
default=7_68 , metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""} , )
SCREAMING_SNAKE_CASE_ : int = field(
default=1_28 , metadata={
"""help""": (
"""The number of bi-directional links created for every new element during the HNSW index construction."""
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase_ : List[str] = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase_ : List[Any] = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase_ : str = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 355 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
def __a ( _UpperCamelCase: Callable[[int | float], int | float] , _UpperCamelCase: int | float , _UpperCamelCase: int | float , _UpperCamelCase: int = 100 , ) -> float:
"""simple docstring"""
_snake_case = x_start
_snake_case = fnc(_UpperCamelCase )
_snake_case = 0.0
for _ in range(_UpperCamelCase ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_snake_case = (x_end - x_start) / steps + xa
_snake_case = fnc(_UpperCamelCase )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
_snake_case = xa
_snake_case = fxa
return area
if __name__ == "__main__":
def __a ( _UpperCamelCase: Any ) -> Optional[int]:
"""simple docstring"""
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
UpperCamelCase_ : Optional[int] = 10
while i <= 100000:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 10
| 142 | 0 |
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
UpperCAmelCase__ : Optional[Any] = VQModel
UpperCAmelCase__ : Tuple = 'sample'
@property
def lowercase_ ( self :Tuple , _A :Any=(32, 32) ) -> Dict:
'''simple docstring'''
__A = 4
__A = 3
__A = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
return {"sample": image}
@property
def lowercase_ ( self :List[Any] ) -> Dict:
'''simple docstring'''
return (3, 32, 32)
@property
def lowercase_ ( self :Union[str, Any] ) -> str:
'''simple docstring'''
return (3, 32, 32)
def lowercase_ ( self :Union[str, Any] ) -> int:
'''simple docstring'''
__A = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
__A = self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
pass
def lowercase_ ( self :List[Any] ) -> int:
'''simple docstring'''
pass
def lowercase_ ( self :Dict ) -> Tuple:
'''simple docstring'''
__A = VQModel.from_pretrained('fusing/vqgan-dummy' , output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(UpperCamelCase__ )
__A = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowercase_ ( self :List[str] ) -> Any:
'''simple docstring'''
__A = VQModel.from_pretrained('fusing/vqgan-dummy' )
model.to(UpperCamelCase__ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
__A = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
__A = image.to(UpperCamelCase__ )
with torch.no_grad():
__A = model(UpperCamelCase__ ).sample
__A = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
__A = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143] )
# fmt: on
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 ) )
| 161 | import itertools
import string
from collections.abc import Generator, Iterable
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = iter(_lowercase )
while True:
SCREAMING_SNAKE_CASE : Optional[Any] = tuple(itertools.islice(_lowercase , _lowercase ) )
if not chunk:
return
yield chunk
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : int = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
SCREAMING_SNAKE_CASE : List[str] = ''''''
if len(_lowercase ) < 2:
return dirty
for i in range(len(_lowercase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(_lowercase ) & 1:
clean += "X"
return clean
def A ( _lowercase ):
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
SCREAMING_SNAKE_CASE : Optional[Any] = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
SCREAMING_SNAKE_CASE : List[Any] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(_lowercase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(_lowercase )
return table
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Any = generate_table(_lowercase )
SCREAMING_SNAKE_CASE : Any = prepare_input(_lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowercase , 2 ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = divmod(table.index(_lowercase ) , 5 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = divmod(table.index(_lowercase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Any = generate_table(_lowercase )
SCREAMING_SNAKE_CASE : List[Any] = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowercase , 2 ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = divmod(table.index(_lowercase ) , 5 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = divmod(table.index(_lowercase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 182 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_lowerCAmelCase : str = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_lowerCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 362 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 308 | 0 |
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class a ( lowerCAmelCase_ ):
@require_torch
def UpperCamelCase_ ( self ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
lowercase = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
lowercase = '\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n '
lowercase = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn\'t access internet\")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
lowercase = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(snake_case_ )
BertModel.from_pretrained(snake_case_ )
BertTokenizer.from_pretrained(snake_case_ )
pipeline(task='fill-mask' , model=snake_case_ )
# baseline - just load from_pretrained with normal network
lowercase = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
lowercase = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase = '1'
lowercase = subprocess.run(snake_case_ , env=snake_case_ , check=snake_case_ , capture_output=snake_case_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def UpperCamelCase_ ( self ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
lowercase = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
lowercase = '\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n '
lowercase = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
lowercase = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(snake_case_ )
BertModel.from_pretrained(snake_case_ )
BertTokenizer.from_pretrained(snake_case_ )
pipeline(task='fill-mask' , model=snake_case_ )
# baseline - just load from_pretrained with normal network
lowercase = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
lowercase = self.get_env()
lowercase = subprocess.run(snake_case_ , env=snake_case_ , check=snake_case_ , capture_output=snake_case_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def UpperCamelCase_ ( self ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
lowercase = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
lowercase = '\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n '
lowercase = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
lowercase = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
lowercase = self.get_env()
lowercase = subprocess.run(snake_case_ , env=snake_case_ , check=snake_case_ , capture_output=snake_case_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# next emulate no network
lowercase = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase = '1'
lowercase = subprocess.run(snake_case_ , env=snake_case_ , check=snake_case_ , capture_output=snake_case_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def UpperCamelCase_ ( self ):
lowercase = '\nfrom transformers import pipeline\n '
lowercase = '\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n '
lowercase = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n '
lowercase = self.get_env()
lowercase = '1'
lowercase = [sys.executable, '-c', '\n'.join([load, mock, run] )]
lowercase = subprocess.run(snake_case_ , env=snake_case_ , check=snake_case_ , capture_output=snake_case_ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , )
@require_torch
def UpperCamelCase_ ( self ):
lowercase = '\nfrom transformers import AutoModel\n '
lowercase = '\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n '
# baseline - just load from_pretrained with normal network
lowercase = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
lowercase = self.get_env()
lowercase = subprocess.run(snake_case_ , env=snake_case_ , check=snake_case_ , capture_output=snake_case_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase = '1'
lowercase = subprocess.run(snake_case_ , env=snake_case_ , check=snake_case_ , capture_output=snake_case_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
| 220 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : str ) -> list:
'''simple docstring'''
if n_term == "":
return []
_UpperCAmelCase = []
for temp in range(int(__lowercase ) ):
series.append(f'1/{temp + 1}' if series else "1" )
return series
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :str = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 22 | 0 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class snake_case_ ( __lowercase ,unittest.TestCase ):
A_ = DebertaVaTokenizer
A_ = DebertaVaTokenizerFast
A_ = True
A_ = True
def UpperCAmelCase__ ( self : Any )->Tuple:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase : Any = DebertaVaTokenizer(_snake_case , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : List[Any] , _snake_case : Union[str, Any] )->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : List[str] = """this is a test"""
__lowerCAmelCase : Optional[int] = """this is a test"""
return input_text, output_text
def UpperCAmelCase__ ( self : Tuple )->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = """<pad>"""
__lowerCAmelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) , _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) , _snake_case )
def UpperCAmelCase__ ( self : Dict )->int:
'''simple docstring'''
__lowerCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(_snake_case ) , 30001 )
def UpperCAmelCase__ ( self : str )->Dict:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def UpperCAmelCase__ ( self : str )->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Tuple = """ \tHeLLo!how \n Are yoU? """
__lowerCAmelCase : int = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
__lowerCAmelCase : Dict = DebertaVaTokenizer(_snake_case , do_lower_case=_snake_case )
__lowerCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case , _snake_case )
__lowerCAmelCase : List[str] = DebertaVaTokenizerFast(_snake_case , do_lower_case=_snake_case )
__lowerCAmelCase : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case , _snake_case )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def UpperCAmelCase__ ( self : Union[str, Any] )->Optional[int]:
'''simple docstring'''
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def UpperCAmelCase__ ( self : List[Any] )->List[Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Optional[int] )->Any:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = """I was born in 92000, and this is falsé."""
__lowerCAmelCase : int = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase : Dict = DebertaVaTokenizer(_snake_case , split_by_punct=_snake_case )
__lowerCAmelCase : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case , _snake_case )
__lowerCAmelCase : List[Any] = DebertaVaTokenizerFast(_snake_case , split_by_punct=_snake_case )
__lowerCAmelCase : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case , _snake_case )
def UpperCAmelCase__ ( self : List[Any] )->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = """I was born in 92000, and this is falsé."""
__lowerCAmelCase : List[Any] = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase : Tuple = DebertaVaTokenizer(_snake_case , do_lower_case=_snake_case , split_by_punct=_snake_case )
__lowerCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case , _snake_case )
__lowerCAmelCase : int = DebertaVaTokenizerFast(_snake_case , do_lower_case=_snake_case , split_by_punct=_snake_case )
__lowerCAmelCase : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case , _snake_case )
def UpperCAmelCase__ ( self : Union[str, Any] )->Tuple:
'''simple docstring'''
__lowerCAmelCase : Any = """I was born in 92000, and this is falsé."""
__lowerCAmelCase : Optional[int] = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowerCAmelCase : str = DebertaVaTokenizer(_snake_case , do_lower_case=_snake_case , split_by_punct=_snake_case )
__lowerCAmelCase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case , _snake_case )
__lowerCAmelCase : str = DebertaVaTokenizerFast(_snake_case , do_lower_case=_snake_case , split_by_punct=_snake_case )
__lowerCAmelCase : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case , _snake_case )
def UpperCAmelCase__ ( self : str )->Dict:
'''simple docstring'''
__lowerCAmelCase : List[Any] = """I was born in 92000, and this is falsé."""
__lowerCAmelCase : Any = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase : Union[str, Any] = DebertaVaTokenizer(_snake_case , do_lower_case=_snake_case , split_by_punct=_snake_case )
__lowerCAmelCase : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case , _snake_case )
__lowerCAmelCase : Optional[Any] = DebertaVaTokenizerFast(_snake_case , do_lower_case=_snake_case , split_by_punct=_snake_case )
__lowerCAmelCase : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case , _snake_case )
def UpperCAmelCase__ ( self : str )->List[str]:
'''simple docstring'''
__lowerCAmelCase : str = """ \tHeLLo!how \n Are yoU? """
__lowerCAmelCase : Optional[int] = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
__lowerCAmelCase : Optional[Any] = DebertaVaTokenizer(_snake_case , do_lower_case=_snake_case , split_by_punct=_snake_case )
__lowerCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case , _snake_case )
__lowerCAmelCase : int = DebertaVaTokenizerFast(_snake_case , do_lower_case=_snake_case , split_by_punct=_snake_case )
__lowerCAmelCase : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case , _snake_case )
def UpperCAmelCase__ ( self : Union[str, Any] )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : str = self.get_tokenizer()
__lowerCAmelCase : List[str] = self.get_rust_tokenizer()
__lowerCAmelCase : Any = """I was born in 92000, and this is falsé."""
__lowerCAmelCase : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) )
__lowerCAmelCase : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case , _snake_case )
__lowerCAmelCase : Dict = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
__lowerCAmelCase : str = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
__lowerCAmelCase : Union[str, Any] = self.get_rust_tokenizer()
__lowerCAmelCase : Any = tokenizer.encode(_snake_case )
__lowerCAmelCase : List[Any] = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def UpperCAmelCase__ ( self : Union[str, Any] )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Dict = """This is a test"""
__lowerCAmelCase : List[Any] = [13, 1, 4398, 25, 21, 1289]
__lowerCAmelCase : List[Any] = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
__lowerCAmelCase : int = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
__lowerCAmelCase : Dict = DebertaVaTokenizer(_snake_case , keep_accents=_snake_case )
__lowerCAmelCase : str = DebertaVaTokenizerFast(_snake_case , keep_accents=_snake_case )
__lowerCAmelCase : Optional[Any] = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
__lowerCAmelCase : Union[str, Any] = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
__lowerCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
__lowerCAmelCase : Dict = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
__lowerCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
__lowerCAmelCase : List[str] = rust_tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
# fmt: off
__lowerCAmelCase : str = """I was born in 92000, and this is falsé."""
__lowerCAmelCase : Tuple = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
__lowerCAmelCase : Optional[Any] = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
__lowerCAmelCase : Optional[int] = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowerCAmelCase : int = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
__lowerCAmelCase : Any = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
__lowerCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
__lowerCAmelCase : Tuple = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
__lowerCAmelCase : Dict = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
__lowerCAmelCase : Tuple = rust_tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def UpperCAmelCase__ ( self : Optional[int] )->Any:
'''simple docstring'''
__lowerCAmelCase : List[str] = DebertaVaTokenizer(_snake_case )
__lowerCAmelCase : int = tokenizer.encode("""sequence builders""" )
__lowerCAmelCase : Any = tokenizer.encode("""multi-sequence build""" )
__lowerCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_snake_case )
__lowerCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _snake_case )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _snake_case , )
@slow
def UpperCAmelCase__ ( self : Any )->Tuple:
'''simple docstring'''
__lowerCAmelCase : Any = {"""input_ids""": [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , ) | 232 |
from datetime import datetime
import requests
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :str ) -> bytes:
__lowerCAmelCase : List[Any] = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
__lowerCAmelCase : Dict = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(SCREAMING_SNAKE_CASE ).content
if __name__ == "__main__":
_UpperCAmelCase = input('Enter Video/IGTV url: ').strip()
_UpperCAmelCase = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''') | 232 | 1 |
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 104 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase_ :
"""simple docstring"""
def __init__( self : str ,lowercase__ : Tuple ,lowercase__ : Dict=1_3 ,lowercase__ : List[str]=3_0 ,lowercase__ : Tuple=2 ,lowercase__ : Optional[int]=3 ,lowercase__ : List[str]=True ,lowercase__ : Tuple=True ,lowercase__ : int=3_2 ,lowercase__ : List[str]=5 ,lowercase__ : Tuple=4 ,lowercase__ : Any=3_7 ,lowercase__ : Any="gelu" ,lowercase__ : Union[str, Any]=0.1 ,lowercase__ : Optional[int]=0.1 ,lowercase__ : str=1_0 ,lowercase__ : Optional[int]=0.0_2 ,lowercase__ : Union[str, Any]=3 ,lowercase__ : Optional[int]=0.6 ,lowercase__ : List[Any]=None ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = mask_ratio
__lowercase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__lowercase = (image_size // patch_size) ** 2
__lowercase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowercase__ ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : str ,lowercase__ : Optional[int] ,lowercase__ : List[str] ):
__lowercase = ViTMAEModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[Any] ,lowercase__ : int ,lowercase__ : Optional[Any] ):
__lowercase = ViTMAEForPreTraining(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ )
__lowercase = (self.image_size // self.patch_size) ** 2
__lowercase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__lowercase = 1
__lowercase = ViTMAEForPreTraining(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(lowercase__ )
__lowercase = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Dict = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = ViTMAEModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,has_text_modality=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase__ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ ,nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ):
# make masks reproducible
np.random.seed(2 )
__lowercase = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
__lowercase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__lowercase = torch.from_numpy(lowercase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__lowercase = pt_noise
super().check_pt_tf_models(lowercase__ ,lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase__ ,lowercase__ ) )
__lowercase = outputs[0].cpu().numpy()
__lowercase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase__ )
__lowercase = model_class.from_pretrained(lowercase__ )
model.to(lowercase__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase__ ,lowercase__ ) )
# Make sure we don't have nans
__lowercase = after_outputs[0].cpu().numpy()
__lowercase = 0
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase__ ,1e-5 )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def SCREAMING_SNAKE_CASE ( self : int ):
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = ViTMAEModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def _A ( ):
"""simple docstring"""
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self : str ):
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
__lowercase = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(lowercase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=lowercase__ ,return_tensors='''pt''' ).to(lowercase__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__lowercase = ViTMAEConfig()
__lowercase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__lowercase = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
__lowercase = model(**lowercase__ ,noise=torch.from_numpy(lowercase__ ).to(device=lowercase__ ) )
# verify the logits
__lowercase = torch.Size((1, 1_9_6, 7_6_8) )
self.assertEqual(outputs.logits.shape ,lowercase__ )
__lowercase = torch.tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,expected_slice.to(lowercase__ ) ,atol=1e-4 ) )
| 104 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =["""transformers""", """torch""", """note_seq"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 259 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase = "cpu" , _UpperCamelCase = None ):
'''simple docstring'''
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location=_UpperCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_UpperCamelCase , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
__lowerCAmelCase = v.half()
if save_path is None: # overwrite src_path
__lowerCAmelCase = src_path
torch.save(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 259 | 1 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class __lowerCAmelCase ( enum.Enum ):
lowerCamelCase_ : str = 0
lowerCamelCase_ : Tuple = 1
lowerCamelCase_ : str = 2
@add_end_docstrings(_a )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : str = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__(self , *__magic_name__ , **__magic_name__ ) -> Dict:
'''simple docstring'''
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
snake_case_ : Tuple = None
if self.model.config.prefix is not None:
snake_case_ : Dict = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
snake_case_ : Tuple = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
snake_case_ , snake_case_ , snake_case_ : Dict = self._sanitize_parameters(prefix=_lowerCAmelCase , **self._forward_params )
snake_case_ : Optional[int] = {**self._preprocess_params, **preprocess_params}
snake_case_ : Optional[int] = {**self._forward_params, **forward_params}
def lowerCamelCase (self , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , **__magic_name__ , ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = {}
if prefix is not None:
snake_case_ : int = prefix
if prefix:
snake_case_ : Optional[int] = self.tokenizer(
_lowerCAmelCase , padding=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_tensors=self.framework )
snake_case_ : List[str] = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
''' [None, \'hole\']''' )
snake_case_ : Tuple = handle_long_generation
preprocess_params.update(_lowerCAmelCase )
snake_case_ : List[str] = generate_kwargs
snake_case_ : Optional[Any] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
snake_case_ : Dict = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
snake_case_ : Union[str, Any] = ReturnType.TENSORS
if return_type is not None:
snake_case_ : List[Any] = return_type
if clean_up_tokenization_spaces is not None:
snake_case_ : List[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
snake_case_ : int = self.tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
if len(_lowerCAmelCase ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
snake_case_ : Dict = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> Tuple:
'''simple docstring'''
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_lowerCAmelCase , **_lowerCAmelCase )
def __call__(self , __magic_name__ , **__magic_name__ ) -> List[str]:
'''simple docstring'''
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def lowerCamelCase (self , __magic_name__ , __magic_name__="" , __magic_name__=None , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[str] = self.tokenizer(
prefix + prompt_text , padding=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_tensors=self.framework )
snake_case_ : Union[str, Any] = prompt_text
if handle_long_generation == "hole":
snake_case_ : Optional[Any] = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
snake_case_ : Dict = generate_kwargs['''max_new_tokens''']
else:
snake_case_ : Union[str, Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
snake_case_ : Union[str, Any] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
snake_case_ : Tuple = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
snake_case_ : Optional[Any] = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def lowerCamelCase (self , __magic_name__ , **__magic_name__ ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = model_inputs['''input_ids''']
snake_case_ : Optional[Any] = model_inputs.get('''attention_mask''' , _lowerCAmelCase )
# Allow empty prompts
if input_ids.shape[1] == 0:
snake_case_ : Optional[int] = None
snake_case_ : List[Any] = None
snake_case_ : Union[str, Any] = 1
else:
snake_case_ : Any = input_ids.shape[0]
snake_case_ : List[str] = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
snake_case_ : Optional[Any] = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
snake_case_ : Optional[Any] = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
snake_case_ : int = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
snake_case_ : Any = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
snake_case_ : Union[str, Any] = self.model.generate(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase , **_lowerCAmelCase )
snake_case_ : Any = generated_sequence.shape[0]
if self.framework == "pt":
snake_case_ : int = generated_sequence.reshape(_lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
snake_case_ : Optional[Any] = tf.reshape(_lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def lowerCamelCase (self , __magic_name__ , __magic_name__=ReturnType.FULL_TEXT , __magic_name__=True ) -> int:
'''simple docstring'''
snake_case_ : Union[str, Any] = model_outputs['''generated_sequence'''][0]
snake_case_ : int = model_outputs['''input_ids''']
snake_case_ : int = model_outputs['''prompt_text''']
snake_case_ : List[Any] = generated_sequence.numpy().tolist()
snake_case_ : Dict = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
snake_case_ : Union[str, Any] = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
snake_case_ : Optional[Any] = self.tokenizer.decode(
_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
snake_case_ : Optional[Any] = 0
else:
snake_case_ : Union[str, Any] = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase , ) )
if return_type == ReturnType.FULL_TEXT:
snake_case_ : List[str] = prompt_text + text[prompt_length:]
else:
snake_case_ : Optional[int] = text[prompt_length:]
snake_case_ : Optional[int] = {'''generated_text''': all_text}
records.append(_lowerCAmelCase )
return records
| 279 |
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
lowerCamelCase = (
"""This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"""
)
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
warnings.warn(_lowerCAmelCase , _lowerCAmelCase )
requires_backends(_lowerCAmelCase , 'sklearn' )
return (preds == labels).mean()
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
warnings.warn(_lowerCAmelCase , _lowerCAmelCase )
requires_backends(_lowerCAmelCase , 'sklearn' )
__lowercase =simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )
__lowercase =fa_score(y_true=_lowerCAmelCase , y_pred=_lowerCAmelCase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
warnings.warn(_lowerCAmelCase , _lowerCAmelCase )
requires_backends(_lowerCAmelCase , 'sklearn' )
__lowercase =pearsonr(_lowerCAmelCase , _lowerCAmelCase )[0]
__lowercase =spearmanr(_lowerCAmelCase , _lowerCAmelCase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
warnings.warn(_lowerCAmelCase , _lowerCAmelCase )
requires_backends(_lowerCAmelCase , 'sklearn' )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase ), f"""Predictions and labels have mismatched lengths {len(_lowerCAmelCase )} and {len(_lowerCAmelCase )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "mrpc":
return acc_and_fa(_lowerCAmelCase , _lowerCAmelCase )
elif task_name == "sts-b":
return pearson_and_spearman(_lowerCAmelCase , _lowerCAmelCase )
elif task_name == "qqp":
return acc_and_fa(_lowerCAmelCase , _lowerCAmelCase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "rte":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "hans":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
else:
raise KeyError(_lowerCAmelCase )
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
warnings.warn(_lowerCAmelCase , _lowerCAmelCase )
requires_backends(_lowerCAmelCase , 'sklearn' )
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise ValueError(f"""Predictions and labels have mismatched lengths {len(_lowerCAmelCase )} and {len(_lowerCAmelCase )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
else:
raise KeyError(_lowerCAmelCase )
| 166 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _snake_case ( lowercase__):
UpperCamelCase__ : Tuple ="""Salesforce/blip-image-captioning-base"""
UpperCamelCase__ : int =(
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
UpperCamelCase__ : Optional[int] ="""image_captioner"""
UpperCamelCase__ : Optional[Any] =AutoModelForVisionaSeq
UpperCamelCase__ : Optional[Any] =["""image"""]
UpperCamelCase__ : str =["""text"""]
def __init__( self : int, *__lowercase : Union[str, Any], **__lowercase : Union[str, Any] ):
requires_backends(self, ["vision"] )
super().__init__(*__lowercase, **__lowercase )
def A__ ( self : List[Any], __lowercase : "Image" ):
return self.pre_processor(images=__lowercase, return_tensors="pt" )
def A__ ( self : int, __lowercase : Union[str, Any] ):
return self.model.generate(**__lowercase )
def A__ ( self : List[Any], __lowercase : Optional[int] ):
return self.pre_processor.batch_decode(__lowercase, skip_special_tokens=__lowercase )[0].strip()
| 224 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
lowercase_ = namedtuple("""covid_data""", """cases deaths recovered""")
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = "https://www.worldometers.info/coronavirus/" ):
lowercase__ = "//div[@class = \"maincounter-number\"]/span/text()"
return covid_data(*html.fromstring(requests.get(SCREAMING_SNAKE_CASE_ ).content ).xpath(SCREAMING_SNAKE_CASE_ ) )
lowercase_ = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 224 | 1 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
SCREAMING_SNAKE_CASE__ = 1_0_0
SCREAMING_SNAKE_CASE__ = set(range(3, NUM_PRIMES, 2))
primes.add(2)
SCREAMING_SNAKE_CASE__ = 4_2
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def lowercase__ ( __UpperCamelCase )-> set[int]:
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
UpperCamelCase = set()
UpperCamelCase = 42
UpperCamelCase = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def lowercase__ ( __UpperCamelCase = 5000 )-> int | None:
for number_to_partition in range(1 , __lowercase ):
if len(partition(__lowercase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'{solution() = }')
| 321 |
'''simple docstring'''
from typing import List
import numpy as np
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
_A = {key: len(__lowercase ) for key, value in gen_kwargs.items() if isinstance(__lowercase , __lowercase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
_A = max(lists_lengths.values() , default=0 )
return max(1 , __lowercase )
def __lowercase ( __lowercase , __lowercase ) -> List[range]:
'''simple docstring'''
_A = []
for group_idx in range(__lowercase ):
_A = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_A = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_A = range(__lowercase , start + num_shards_to_add )
shards_indices_per_group.append(__lowercase )
return shards_indices_per_group
def __lowercase ( __lowercase , __lowercase ) -> List[dict]:
'''simple docstring'''
_A = _number_of_shards_in_gen_kwargs(__lowercase )
if num_shards == 1:
return [dict(__lowercase )]
else:
_A = _distribute_shards(num_shards=__lowercase , max_num_jobs=__lowercase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__lowercase , __lowercase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__lowercase ) )
]
def __lowercase ( __lowercase ) -> dict:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __lowercase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __lowercase ( __lowercase , __lowercase ) -> dict:
'''simple docstring'''
_A = {len(__lowercase ) for value in gen_kwargs.values() if isinstance(__lowercase , __lowercase )}
_A = {}
for size in list_sizes:
_A = list(range(__lowercase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_A = dict(__lowercase )
for key, value in shuffled_kwargs.items():
if isinstance(__lowercase , __lowercase ):
_A = [value[i] for i in indices_per_size[len(__lowercase )]]
return shuffled_kwargs
| 79 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Optional[Any] )-> Dict:
'''simple docstring'''
A__ = tempfile.mkdtemp()
A__ = SamImageProcessor()
A__ = SamProcessor(lowercase_ )
processor.save_pretrained(self.tmpdirname )
def snake_case__ ( self : Union[str, Any],**lowercase_ : List[Any] )-> Optional[Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname,**lowercase_ ).image_processor
def snake_case__ ( self : Any )-> List[str]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def snake_case__ ( self : Union[str, Any] )-> Any:
'''simple docstring'''
A__ = [np.random.randint(2_5_5,size=(3, 3_0, 4_0_0),dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(lowercase_,0,-1 ) ) for x in image_inputs]
return image_inputs
def snake_case__ ( self : List[Any] )-> Dict:
'''simple docstring'''
A__ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_image_processor(do_normalize=lowercase_,padding_value=1.0 )
A__ = SamProcessor.from_pretrained(self.tmpdirname,do_normalize=lowercase_,padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string(),image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor,lowercase_ )
def snake_case__ ( self : List[Any] )-> List[Any]:
'''simple docstring'''
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=lowercase_ )
A__ = self.prepare_image_inputs()
A__ = image_processor(lowercase_,return_tensors='np' )
A__ = processor(images=lowercase_,return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(),input_processor[key].sum(),delta=1E-2 )
@require_torch
def snake_case__ ( self : Tuple )-> Tuple:
'''simple docstring'''
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=lowercase_ )
A__ = [torch.ones((1, 3, 5, 5) )]
A__ = [[1_7_6_4, 2_6_4_6]]
A__ = [[6_8_3, 1_0_2_4]]
A__ = processor.post_process_masks(lowercase_,lowercase_,lowercase_ )
self.assertEqual(masks[0].shape,(1, 3, 1_7_6_4, 2_6_4_6) )
A__ = processor.post_process_masks(
lowercase_,torch.tensor(lowercase_ ),torch.tensor(lowercase_ ) )
self.assertEqual(masks[0].shape,(1, 3, 1_7_6_4, 2_6_4_6) )
# should also work with np
A__ = [np.ones((1, 3, 5, 5) )]
A__ = processor.post_process_masks(lowercase_,np.array(lowercase_ ),np.array(lowercase_ ) )
self.assertEqual(masks[0].shape,(1, 3, 1_7_6_4, 2_6_4_6) )
A__ = [[1, 0], [0, 1]]
with self.assertRaises(lowercase_ ):
A__ = processor.post_process_masks(lowercase_,np.array(lowercase_ ),np.array(lowercase_ ) )
@require_vision
@require_tf
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Dict )-> Optional[Any]:
'''simple docstring'''
A__ = tempfile.mkdtemp()
A__ = SamImageProcessor()
A__ = SamProcessor(lowercase_ )
processor.save_pretrained(self.tmpdirname )
def snake_case__ ( self : Optional[Any],**lowercase_ : Dict )-> List[str]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname,**lowercase_ ).image_processor
def snake_case__ ( self : Union[str, Any] )-> str:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def snake_case__ ( self : List[Any] )-> int:
'''simple docstring'''
A__ = [np.random.randint(2_5_5,size=(3, 3_0, 4_0_0),dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(lowercase_,0,-1 ) ) for x in image_inputs]
return image_inputs
def snake_case__ ( self : List[Any] )-> Optional[int]:
'''simple docstring'''
A__ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_image_processor(do_normalize=lowercase_,padding_value=1.0 )
A__ = SamProcessor.from_pretrained(self.tmpdirname,do_normalize=lowercase_,padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string(),image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor,lowercase_ )
def snake_case__ ( self : int )-> List[str]:
'''simple docstring'''
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=lowercase_ )
A__ = self.prepare_image_inputs()
A__ = image_processor(lowercase_,return_tensors='np' )
A__ = processor(images=lowercase_,return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(),input_processor[key].sum(),delta=1E-2 )
@require_tf
def snake_case__ ( self : Optional[int] )-> Optional[int]:
'''simple docstring'''
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=lowercase_ )
A__ = [tf.ones((1, 3, 5, 5) )]
A__ = [[1_7_6_4, 2_6_4_6]]
A__ = [[6_8_3, 1_0_2_4]]
A__ = processor.post_process_masks(lowercase_,lowercase_,lowercase_,return_tensors='tf' )
self.assertEqual(masks[0].shape,(1, 3, 1_7_6_4, 2_6_4_6) )
A__ = processor.post_process_masks(
lowercase_,tf.convert_to_tensor(lowercase_ ),tf.convert_to_tensor(lowercase_ ),return_tensors='tf',)
self.assertEqual(masks[0].shape,(1, 3, 1_7_6_4, 2_6_4_6) )
# should also work with np
A__ = [np.ones((1, 3, 5, 5) )]
A__ = processor.post_process_masks(
lowercase_,np.array(lowercase_ ),np.array(lowercase_ ),return_tensors='tf' )
self.assertEqual(masks[0].shape,(1, 3, 1_7_6_4, 2_6_4_6) )
A__ = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
A__ = processor.post_process_masks(
lowercase_,np.array(lowercase_ ),np.array(lowercase_ ),return_tensors='tf' )
@require_vision
@require_torchvision
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Tuple )-> str:
'''simple docstring'''
A__ = tempfile.mkdtemp()
A__ = SamImageProcessor()
A__ = SamProcessor(lowercase_ )
processor.save_pretrained(self.tmpdirname )
def snake_case__ ( self : Dict,**lowercase_ : Optional[Any] )-> Any:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname,**lowercase_ ).image_processor
def snake_case__ ( self : Optional[Any] )-> int:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def snake_case__ ( self : List[Any] )-> List[str]:
'''simple docstring'''
A__ = [np.random.randint(2_5_5,size=(3, 3_0, 4_0_0),dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(lowercase_,0,-1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def snake_case__ ( self : Union[str, Any] )-> Any:
'''simple docstring'''
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=lowercase_ )
A__ = np.random.randint(0,2,size=(1, 3, 5, 5) ).astype(np.floataa )
A__ = [tf.convert_to_tensor(lowercase_ )]
A__ = [torch.tensor(lowercase_ )]
A__ = [[1_7_6_4, 2_6_4_6]]
A__ = [[6_8_3, 1_0_2_4]]
A__ = processor.post_process_masks(
lowercase_,lowercase_,lowercase_,return_tensors='tf' )
A__ = processor.post_process_masks(
lowercase_,lowercase_,lowercase_,return_tensors='pt' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def snake_case__ ( self : List[Any] )-> Any:
'''simple docstring'''
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=lowercase_ )
A__ = self.prepare_image_inputs()
A__ = image_processor(lowercase_,return_tensors='pt' )['pixel_values'].numpy()
A__ = processor(images=lowercase_,return_tensors='pt' )['pixel_values'].numpy()
A__ = image_processor(lowercase_,return_tensors='tf' )['pixel_values'].numpy()
A__ = processor(images=lowercase_,return_tensors='tf' )['pixel_values'].numpy()
self.assertTrue(np.allclose(lowercase_,lowercase_ ) )
self.assertTrue(np.allclose(lowercase_,lowercase_ ) )
self.assertTrue(np.allclose(lowercase_,lowercase_ ) )
| 282 |
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/config.json",
# See all BART models at https://huggingface.co/models?filter=bart
}
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 'bart'
lowerCamelCase = ['past_key_values']
lowerCamelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Tuple,lowercase_ : Optional[int]=5_0_2_6_5,lowercase_ : List[str]=1_0_2_4,lowercase_ : Any=1_2,lowercase_ : Optional[Any]=4_0_9_6,lowercase_ : str=1_6,lowercase_ : int=1_2,lowercase_ : Optional[Any]=4_0_9_6,lowercase_ : Any=1_6,lowercase_ : Any=0.0,lowercase_ : str=0.0,lowercase_ : Optional[Any]="gelu",lowercase_ : List[str]=1_0_2_4,lowercase_ : List[Any]=0.1,lowercase_ : Union[str, Any]=0.0,lowercase_ : Optional[int]=0.0,lowercase_ : List[Any]=0.02,lowercase_ : int=0.0,lowercase_ : Optional[Any]=False,lowercase_ : List[Any]=True,lowercase_ : Union[str, Any]=3,lowercase_ : int=1,lowercase_ : int=0,lowercase_ : List[str]=2,lowercase_ : Optional[int]=True,lowercase_ : Tuple=2,lowercase_ : List[str]=2,**lowercase_ : Dict,)-> List[Any]:
'''simple docstring'''
A__ = vocab_size
A__ = max_position_embeddings
A__ = d_model
A__ = encoder_ffn_dim
A__ = encoder_layers
A__ = encoder_attention_heads
A__ = decoder_ffn_dim
A__ = decoder_layers
A__ = decoder_attention_heads
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = activation_function
A__ = init_std
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = classifier_dropout
A__ = use_cache
A__ = encoder_layers
A__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=lowercase_,pad_token_id=lowercase_,bos_token_id=lowercase_,eos_token_id=lowercase_,is_encoder_decoder=lowercase_,decoder_start_token_id=lowercase_,forced_eos_token_id=lowercase_,**lowercase_,)
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated',lowercase_ ):
A__ = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.' )
class A ( _UpperCAmelCase ):
"""simple docstring"""
@property
def snake_case__ ( self : Dict )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
A__ = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A__ = {0: 'batch'}
A__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
A__ = {0: 'batch', 1: 'decoder_sequence'}
A__ = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase_,direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
A__ = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A__ , A__ = self.num_layers
for i in range(lowercase_ ):
A__ = {0: 'batch', 2: 'past_sequence + sequence'}
A__ = {0: 'batch', 2: 'past_sequence + sequence'}
else:
A__ = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def snake_case__ ( self : Optional[Any] )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
A__ = super().outputs
else:
A__ = super(lowercase_,self ).outputs
if self.use_past:
A__ , A__ = self.num_layers
for i in range(lowercase_ ):
A__ = {0: 'batch', 2: 'past_sequence + sequence'}
A__ = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def snake_case__ ( self : Tuple,lowercase_ : PreTrainedTokenizer,lowercase_ : int = -1,lowercase_ : int = -1,lowercase_ : bool = False,lowercase_ : Optional[TensorType] = None,)-> Mapping[str, Any]:
'''simple docstring'''
A__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_,lowercase_,lowercase_,lowercase_,lowercase_ )
# Generate decoder inputs
A__ = seq_length if not self.use_past else 1
A__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_,lowercase_,lowercase_,lowercase_,lowercase_ )
A__ = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
A__ = dict(**lowercase_,**lowercase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A__ , A__ = common_inputs['input_ids'].shape
A__ = common_inputs['decoder_input_ids'].shape[1]
A__ , A__ = self.num_attention_heads
A__ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A__ = decoder_seq_length + 3
A__ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A__ = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(lowercase_,lowercase_ )],dim=1 )
A__ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A__ , A__ = self.num_layers
A__ = min(lowercase_,lowercase_ )
A__ = max(lowercase_,lowercase_ ) - min_num_layers
A__ = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(lowercase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
) )
# TODO: test this.
A__ = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(lowercase_,lowercase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) )
return common_inputs
def snake_case__ ( self : List[str],lowercase_ : PreTrainedTokenizer,lowercase_ : int = -1,lowercase_ : int = -1,lowercase_ : bool = False,lowercase_ : Optional[TensorType] = None,)-> Mapping[str, Any]:
'''simple docstring'''
A__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_,lowercase_,lowercase_,lowercase_,lowercase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A__ , A__ = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
A__ = seqlen + 2
A__ , A__ = self.num_layers
A__ , A__ = self.num_attention_heads
A__ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A__ = common_inputs['attention_mask'].dtype
A__ = torch.cat(
[common_inputs['attention_mask'], torch.ones(lowercase_,lowercase_,dtype=lowercase_ )],dim=1 )
A__ = [
(torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) for _ in range(lowercase_ )
]
return common_inputs
def snake_case__ ( self : Union[str, Any],lowercase_ : PreTrainedTokenizer,lowercase_ : int = -1,lowercase_ : int = -1,lowercase_ : bool = False,lowercase_ : Optional[TensorType] = None,)-> Mapping[str, Any]:
'''simple docstring'''
A__ = compute_effective_axis_dimension(
lowercase_,fixed_dimension=OnnxConfig.default_fixed_batch,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A__ = tokenizer.num_special_tokens_to_add(lowercase_ )
A__ = compute_effective_axis_dimension(
lowercase_,fixed_dimension=OnnxConfig.default_fixed_sequence,num_token_to_add=lowercase_ )
# Generate dummy inputs according to compute batch and sequence
A__ = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
A__ = dict(tokenizer(lowercase_,return_tensors=lowercase_ ) )
return common_inputs
def snake_case__ ( self : Union[str, Any],lowercase_ : PreTrainedTokenizer,lowercase_ : int = -1,lowercase_ : int = -1,lowercase_ : bool = False,lowercase_ : Optional[TensorType] = None,)-> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
A__ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase_,batch_size=lowercase_,seq_length=lowercase_,is_pair=lowercase_,framework=lowercase_ )
elif self.task == "causal-lm":
A__ = self._generate_dummy_inputs_for_causal_lm(
lowercase_,batch_size=lowercase_,seq_length=lowercase_,is_pair=lowercase_,framework=lowercase_ )
else:
A__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_,batch_size=lowercase_,seq_length=lowercase_,is_pair=lowercase_,framework=lowercase_ )
return common_inputs
def snake_case__ ( self : int,lowercase_ : Tuple,lowercase_ : int,lowercase_ : int,lowercase_ : str )-> str:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
A__ = super()._flatten_past_key_values_(lowercase_,lowercase_,lowercase_,lowercase_ )
else:
A__ = super(lowercase_,self )._flatten_past_key_values_(
lowercase_,lowercase_,lowercase_,lowercase_ )
| 282 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ : Tuple = 1
A_ : Optional[Any] = 3
A_ : List[Any] = (32, 32)
A_ : List[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_SCREAMING_SNAKE_CASE )
return image
@property
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
A_ : int = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=_SCREAMING_SNAKE_CASE , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def _snake_case ( self )->str:
'''simple docstring'''
torch.manual_seed(0 )
A_ : int = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def _snake_case ( self )->List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
A_ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
return CLIPTextModel(_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->int:
'''simple docstring'''
A_ : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A_ : Any = self.dummy_cond_unet_upscale
A_ : Optional[Any] = DDPMScheduler()
A_ : List[str] = DDIMScheduler(prediction_type='''v_prediction''' )
A_ : Optional[int] = self.dummy_vae
A_ : Tuple = self.dummy_text_encoder
A_ : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
A_ : Union[str, Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
A_ : Optional[Any] = Image.fromarray(np.uinta(_SCREAMING_SNAKE_CASE ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
A_ : Optional[int] = StableDiffusionUpscalePipeline(
unet=_SCREAMING_SNAKE_CASE , low_res_scheduler=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , max_noise_level=350 , )
A_ : List[Any] = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : Tuple = '''A painting of a squirrel eating a burger'''
A_ : Dict = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
A_ : Dict = sd_pipe(
[prompt] , image=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
A_ : Union[str, Any] = output.images
A_ : Dict = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
A_ : Union[str, Any] = sd_pipe(
[prompt] , image=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=_SCREAMING_SNAKE_CASE , )[0]
A_ : str = image[0, -3:, -3:, -1]
A_ : str = image_from_tuple[0, -3:, -3:, -1]
A_ : Tuple = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
A_ : Any = np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A_ : str = self.dummy_cond_unet_upscale
A_ : Optional[Any] = DDPMScheduler()
A_ : Union[str, Any] = DDIMScheduler(prediction_type='''v_prediction''' )
A_ : str = self.dummy_vae
A_ : str = self.dummy_text_encoder
A_ : Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
A_ : List[str] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
A_ : Dict = Image.fromarray(np.uinta(_SCREAMING_SNAKE_CASE ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
A_ : List[str] = StableDiffusionUpscalePipeline(
unet=_SCREAMING_SNAKE_CASE , low_res_scheduler=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , max_noise_level=350 , )
A_ : Union[str, Any] = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : int = '''A painting of a squirrel eating a burger'''
A_ : List[Any] = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
A_ : List[Any] = output.images
assert image.shape[0] == 2
A_ : Union[str, Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
A_ : List[str] = sd_pipe(
[prompt] , image=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
A_ : Dict = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : int = self.dummy_cond_unet_upscale
A_ : int = DDPMScheduler()
A_ : Optional[Any] = DDIMScheduler(prediction_type='''v_prediction''' )
A_ : Any = self.dummy_vae
A_ : Optional[Any] = self.dummy_text_encoder
A_ : int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
A_ : List[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
A_ : Tuple = Image.fromarray(np.uinta(_SCREAMING_SNAKE_CASE ) ).convert('''RGB''' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
A_ : int = unet.half()
A_ : Tuple = text_encoder.half()
# make sure here that pndm scheduler skips prk
A_ : str = StableDiffusionUpscalePipeline(
unet=_SCREAMING_SNAKE_CASE , low_res_scheduler=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , max_noise_level=350 , )
A_ : Union[str, Any] = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : int = '''A painting of a squirrel eating a burger'''
A_ : Tuple = torch.manual_seed(0 )
A_ : Optional[int] = sd_pipe(
[prompt] , image=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='''np''' , ).images
A_ : Any = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self )->str:
'''simple docstring'''
A_ : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
A_ : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
A_ : Union[str, Any] = '''stabilityai/stable-diffusion-x4-upscaler'''
A_ : List[str] = StableDiffusionUpscalePipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
A_ : Tuple = '''a cat sitting on a park bench'''
A_ : int = torch.manual_seed(0 )
A_ : str = pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , output_type='''np''' , )
A_ : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
A_ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
A_ : List[str] = '''stabilityai/stable-diffusion-x4-upscaler'''
A_ : List[str] = StableDiffusionUpscalePipeline.from_pretrained(
_SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa , )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
A_ : Optional[int] = '''a cat sitting on a park bench'''
A_ : Optional[Any] = torch.manual_seed(0 )
A_ : Dict = pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , output_type='''np''' , )
A_ : int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _snake_case ( self )->List[Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A_ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
A_ : Any = '''stabilityai/stable-diffusion-x4-upscaler'''
A_ : Dict = StableDiffusionUpscalePipeline.from_pretrained(
_SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa , )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A_ : Union[str, Any] = '''a cat sitting on a park bench'''
A_ : Optional[int] = torch.manual_seed(0 )
A_ : Optional[Any] = pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=5 , output_type='''np''' , )
A_ : Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 186 |
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( _lowercase ) -> list[int]: # This function is recursive
'''simple docstring'''
lowerCamelCase_ : Tuple = len(_lowercase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowerCamelCase_ : Union[str, Any] = array[0]
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : List[Any] = 1
lowerCamelCase_ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
lowerCamelCase_ : Optional[int] = True
lowerCamelCase_ : List[str] = [element for element in array[i:] if element >= array[i]]
lowerCamelCase_ : List[str] = longest_subsequence(_lowercase )
if len(_lowercase ) > len(_lowercase ):
lowerCamelCase_ : Any = temp_array
else:
i += 1
lowerCamelCase_ : Optional[int] = [element for element in array[1:] if element >= pivot]
lowerCamelCase_ : str = [pivot, *longest_subsequence(_lowercase )]
if len(_lowercase ) > len(_lowercase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318 | 0 |
def _a ( a :int ) -> bool:
if number < 0:
raise ValueError('''number must not be negative''' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
from math import ceil, sqrt
def _a ( a :int = 1_000_000 ) -> int:
a = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
a = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
a = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 | 1 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
lowerCAmelCase_ = datasets.logging.get_logger(__name__)
lowerCAmelCase_ = '''\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
'''
lowerCAmelCase_ = '''\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project\'s README at https://github.com/google-research/bleurt#readme for more information.
'''
lowerCAmelCase_ = '''
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
\'scores\': List of scores.
Examples:
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> bleurt = datasets.load_metric("bleurt")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results["scores"]])
[1.03, 1.04]
'''
lowerCAmelCase_ = {
'''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''',
'''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''',
'''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''',
'''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''',
'''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''',
'''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''',
'''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''',
'''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''',
'''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''',
'''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , )
def lowerCamelCase (self , __magic_name__ ) -> List[Any]:
'''simple docstring'''
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' )
snake_case_ : Dict = '''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
snake_case_ : Optional[int] = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
snake_case_ : Union[str, Any] = self.config_name.upper()
else:
raise KeyError(
F'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' )
# download the model checkpoint specified by self.config_name and set up the scorer
snake_case_ : Any = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
snake_case_ : Dict = score.BleurtScorer(os.path.join(__magic_name__ , __magic_name__ ) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = self.scorer.score(references=__magic_name__ , candidates=__magic_name__ )
return {"scores": scores}
| 279 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __lowerCAmelCase ( unittest.TestCase ):
lowerCamelCase_ : Any = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
snake_case_ : Any = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
snake_case_ : List[Any] = VideoClassificationPipeline(model=__magic_name__ , image_processor=__magic_name__ , top_k=2 )
snake_case_ : str = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
for example in examples:
snake_case_ : Union[str, Any] = video_classifier(__magic_name__ )
self.assertEqual(
__magic_name__ , [
{'''score''': ANY(__magic_name__ ), '''label''': ANY(__magic_name__ )},
{'''score''': ANY(__magic_name__ ), '''label''': ANY(__magic_name__ )},
] , )
@require_torch
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Any = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
snake_case_ : str = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} )
snake_case_ : int = pipeline(
'''video-classification''' , model=__magic_name__ , feature_extractor=__magic_name__ , frame_sampling_rate=4 )
snake_case_ : List[str] = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
snake_case_ : Union[str, Any] = video_classifier(__magic_name__ , top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=4 ) , [{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}] , )
snake_case_ : int = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=4 ) , [
[{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
pass
| 279 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase : List[str] = {
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
lowerCAmelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 368 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "mobilenet_v2"
def __init__( self , snake_case__=3 , snake_case__=224 , snake_case__=1.0 , snake_case__=8 , snake_case__=8 , snake_case__=6 , snake_case__=32 , snake_case__=True , snake_case__=True , snake_case__="relu6" , snake_case__=True , snake_case__=0.8 , snake_case__=0.02 , snake_case__=0.001 , snake_case__=255 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Union[str, Any] = image_size
_lowerCAmelCase : List[Any] = depth_multiplier
_lowerCAmelCase : List[Any] = depth_divisible_by
_lowerCAmelCase : Optional[Any] = min_depth
_lowerCAmelCase : str = expand_ratio
_lowerCAmelCase : str = output_stride
_lowerCAmelCase : Any = first_layer_is_expansion
_lowerCAmelCase : int = finegrained_output
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : List[str] = tf_padding
_lowerCAmelCase : Optional[int] = classifier_dropout_prob
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : str = semantic_loss_ignore_index
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = version.parse("1.11" )
@property
def a ( self ):
'''simple docstring'''
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def a ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def a ( self ):
'''simple docstring'''
return 1E-4
| 25 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
def __init__( self : List[str] , *__lowerCAmelCase : str , **__lowerCAmelCase : str ):
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 289 | """simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = get_failure_array(lowercase )
# 2) Step through text searching for pattern
_UpperCAmelCase , _UpperCAmelCase = 0, 0 # index into text, pattern
while i < len(lowercase ):
if pattern[j] == text[i]:
if j == (len(lowercase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_UpperCAmelCase = failure[j - 1]
continue
i += 1
return False
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [0]
_UpperCAmelCase = 0
_UpperCAmelCase = 1
while j < len(lowercase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_UpperCAmelCase = failure[i - 1]
continue
j += 1
failure.append(lowercase )
return failure
if __name__ == "__main__":
# Test 1)
UpperCAmelCase__ = """abc1abc12"""
UpperCAmelCase__ = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
UpperCAmelCase__ = """alskfjaldsk23adsfabcabc"""
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
UpperCAmelCase__ = """ABABX"""
UpperCAmelCase__ = """ABABZABABYABABX"""
assert kmp(pattern, text)
# Test 3)
UpperCAmelCase__ = """AAAB"""
UpperCAmelCase__ = """ABAAAAAB"""
assert kmp(pattern, text)
# Test 4)
UpperCAmelCase__ = """abcdabcy"""
UpperCAmelCase__ = """abcxabcdabxabcdabcdabcy"""
assert kmp(pattern, text)
# Test 5)
UpperCAmelCase__ = """aabaabaaa"""
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 289 | 1 |
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def lowercase_ ( _lowerCamelCase: str ) -> Union[str, Any]:
'''simple docstring'''
def decorator(_lowerCamelCase: Dict ):
__lowerCamelCase : Optional[int] = getattr(_lowerCamelCase , "handle_key" , [] )
handle += [key]
setattr(_lowerCamelCase , "handle_key" , _lowerCamelCase )
return func
return decorator
def lowercase_ ( *_lowerCamelCase: List[str] ) -> Optional[int]:
'''simple docstring'''
def decorator(_lowerCamelCase: Tuple ):
__lowerCamelCase : Union[str, Any] = getattr(_lowerCamelCase , "handle_key" , [] )
handle += keys
setattr(_lowerCamelCase , "handle_key" , _lowerCamelCase )
return func
return decorator
class _snake_case ( a__ ):
def __new__( cls : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple ):
__lowerCamelCase : Optional[int] = super().__new__(cls , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if not hasattr(UpperCAmelCase , "key_handler" ):
setattr(UpperCAmelCase , "key_handler" , {} )
setattr(UpperCAmelCase , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
__lowerCamelCase : str = getattr(UpperCAmelCase , "handle_key" , [] )
for key in handled_keys:
__lowerCamelCase : Optional[Any] = value
return new_cls
@staticmethod
def lowerCamelCase__ ( cls : Tuple ):
__lowerCamelCase : Union[str, Any] = get_character()
if char != KEYMAP["undefined"]:
__lowerCamelCase : Union[str, Any] = ord(UpperCAmelCase )
__lowerCamelCase : List[Any] = cls.key_handler.get(UpperCAmelCase )
if handler:
__lowerCamelCase : Optional[int] = char
return handler(cls )
else:
return None
def lowercase_ ( cls: Dict ) -> Optional[Any]:
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() ) | 64 | """simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__A = logging.get_logger(__name__)
__A = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
__A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def lowercase_ ( _lowerCamelCase: str ) -> int:
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
__lowerCamelCase : int = model_type_to_module_name(_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = importlib.import_module(F""".{module_name}""" , "transformers.models" )
try:
return getattr(_lowerCamelCase , _lowerCamelCase )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(_lowerCamelCase , "__name__" , _lowerCamelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__lowerCamelCase : int = importlib.import_module("transformers" )
if hasattr(_lowerCamelCase , _lowerCamelCase ):
return getattr(_lowerCamelCase , _lowerCamelCase )
return None
def lowercase_ ( _lowerCamelCase: Union[str, os.PathLike] , _lowerCamelCase: Optional[Union[str, os.PathLike]] = None , _lowerCamelCase: bool = False , _lowerCamelCase: bool = False , _lowerCamelCase: Optional[Dict[str, str]] = None , _lowerCamelCase: Optional[Union[bool, str]] = None , _lowerCamelCase: Optional[str] = None , _lowerCamelCase: bool = False , **_lowerCamelCase: Tuple , ) -> List[str]:
'''simple docstring'''
__lowerCamelCase : List[str] = get_file_from_repo(
_lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , )
if resolved_config_file is None:
logger.info(
"Could not locate the image processor configuration file, will try to use the model config instead." )
return {}
with open(_lowerCamelCase , encoding="utf-8" ) as reader:
return json.load(_lowerCamelCase )
class _snake_case :
def __init__( self : Tuple ):
raise EnvironmentError(
"AutoImageProcessor is designed to be instantiated "
"using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(UpperCAmelCase )
def lowerCamelCase__ ( cls : Dict , UpperCAmelCase : Optional[int] , **UpperCAmelCase : Any ):
__lowerCamelCase : int = kwargs.pop("config" , UpperCAmelCase )
__lowerCamelCase : Dict = kwargs.pop("trust_remote_code" , UpperCAmelCase )
__lowerCamelCase : Any = True
__lowerCamelCase , __lowerCamelCase : str = ImageProcessingMixin.get_image_processor_dict(UpperCAmelCase , **UpperCAmelCase )
__lowerCamelCase : Optional[int] = config_dict.get("image_processor_type" , UpperCAmelCase )
__lowerCamelCase : List[Any] = None
if "AutoImageProcessor" in config_dict.get("auto_map" , {} ):
__lowerCamelCase : List[str] = config_dict["auto_map"]["AutoImageProcessor"]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
__lowerCamelCase : Dict = config_dict.pop("feature_extractor_type" , UpperCAmelCase )
if feature_extractor_class is not None:
logger.warning(
"Could not find image processor class in the image processor config or the model config. Loading"
" based on pattern matching with the model's feature extractor configuration." )
__lowerCamelCase : Tuple = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" )
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
__lowerCamelCase : Any = config_dict["auto_map"]["AutoFeatureExtractor"]
__lowerCamelCase : Optional[int] = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" )
logger.warning(
"Could not find image processor auto map in the image processor config or the model config."
" Loading based on pattern matching with the model's feature extractor configuration." )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : int = AutoConfig.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
# It could be in `config.image_processor_type``
__lowerCamelCase : int = getattr(UpperCAmelCase , "image_processor_type" , UpperCAmelCase )
if hasattr(UpperCAmelCase , "auto_map" ) and "AutoImageProcessor" in config.auto_map:
__lowerCamelCase : Optional[int] = config.auto_map["AutoImageProcessor"]
if image_processor_class is not None:
__lowerCamelCase : Any = image_processor_class_from_name(UpperCAmelCase )
__lowerCamelCase : str = image_processor_auto_map is not None
__lowerCamelCase : Optional[Any] = image_processor_class is not None or type(UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING
__lowerCamelCase : Dict = resolve_trust_remote_code(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if has_remote_code and trust_remote_code:
__lowerCamelCase : Optional[Any] = get_class_from_dynamic_module(
UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
__lowerCamelCase : List[Any] = kwargs.pop("code_revision" , UpperCAmelCase )
if os.path.isdir(UpperCAmelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(UpperCAmelCase , **UpperCAmelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(UpperCAmelCase , **UpperCAmelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING:
__lowerCamelCase : Tuple = IMAGE_PROCESSOR_MAPPING[type(UpperCAmelCase )]
return image_processor_class.from_dict(UpperCAmelCase , **UpperCAmelCase )
raise ValueError(
F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def lowerCamelCase__ ( UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] ):
IMAGE_PROCESSOR_MAPPING.register(UpperCAmelCase , UpperCAmelCase ) | 64 | 1 |
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : int , lowercase_ : Dict , lowercase_ : str=99 , lowercase_ : List[Any]=13 , lowercase_ : Optional[Any]=16 , lowercase_ : Any=7 , lowercase_ : str=True , lowercase_ : List[Any]=True , lowercase_ : Tuple=True , lowercase_ : Optional[Any]=False , lowercase_ : List[str]=True , lowercase_ : List[Any]=2 , lowercase_ : Dict=32 , lowercase_ : List[str]=4 , lowercase_ : Union[str, Any]=4 , lowercase_ : Optional[Any]=30 , lowercase_ : Any=0 , lowercase_ : Tuple=1 , lowercase_ : Any=2 , lowercase_ : Optional[int]=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE_ : Dict = batch_size
SCREAMING_SNAKE_CASE_ : Dict = decoder_seq_length
# For common tests
SCREAMING_SNAKE_CASE_ : str = self.decoder_seq_length
SCREAMING_SNAKE_CASE_ : Optional[Any] = is_training
SCREAMING_SNAKE_CASE_ : int = use_attention_mask
SCREAMING_SNAKE_CASE_ : List[Any] = use_labels
SCREAMING_SNAKE_CASE_ : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] = d_model
SCREAMING_SNAKE_CASE_ : List[str] = d_model
SCREAMING_SNAKE_CASE_ : List[Any] = decoder_layers
SCREAMING_SNAKE_CASE_ : Any = decoder_layers
SCREAMING_SNAKE_CASE_ : Any = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE_ : Dict = decoder_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = eos_token_id
SCREAMING_SNAKE_CASE_ : List[str] = bos_token_id
SCREAMING_SNAKE_CASE_ : Optional[int] = pad_token_id
SCREAMING_SNAKE_CASE_ : List[str] = decoder_start_token_id
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_cache
SCREAMING_SNAKE_CASE_ : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Tuple = None
SCREAMING_SNAKE_CASE_ : Optional[int] = decoder_seq_length
SCREAMING_SNAKE_CASE_ : Any = 2
SCREAMING_SNAKE_CASE_ : int = 1
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE_ : int = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2)
SCREAMING_SNAKE_CASE_ : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE_ : Optional[Any] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Tuple , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : Any = TrOCRDecoder(config=SCREAMING_SNAKE_CASE_).to(SCREAMING_SNAKE_CASE_).eval()
SCREAMING_SNAKE_CASE_ : int = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_)
SCREAMING_SNAKE_CASE_ : Tuple = model(SCREAMING_SNAKE_CASE_)
SCREAMING_SNAKE_CASE_ : Optional[int] = model(SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_)
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_) == len(SCREAMING_SNAKE_CASE_))
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_) == len(SCREAMING_SNAKE_CASE_) + 1)
SCREAMING_SNAKE_CASE_ : str = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor((2, 1) , config.vocab_size - 1) + 1
# append to next input_ids and
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1)
SCREAMING_SNAKE_CASE_ : str = model(SCREAMING_SNAKE_CASE_)['''last_hidden_state''']
SCREAMING_SNAKE_CASE_ : int = model(SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_)['''last_hidden_state''']
# select random slice
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((1,) , output_from_past.shape[-1]).item()
SCREAMING_SNAKE_CASE_ : Tuple = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ : Optional[int] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : List[str] = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
__UpperCamelCase = (TrOCRForCausalLM,) if is_torch_available() else ()
__UpperCamelCase = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {}
__UpperCamelCase = True
__UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TrOCRStandaloneDecoderModelTester(self , is_training=SCREAMING_SNAKE_CASE_)
SCREAMING_SNAKE_CASE_ : Tuple = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*SCREAMING_SNAKE_CASE_)
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
return
@unittest.skip('''The model doesn\'t support left padding''') # and it's not used enough to be worth fixing :)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
pass
| 91 |
def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
UpperCamelCase :Any = len(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = len(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :int = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
UpperCamelCase :List[str] = True
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
UpperCamelCase :List[Any] = True
if a[i].islower():
UpperCamelCase :List[Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 259 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Optional[Any] ="""gpt_neox"""
def __init__( self : Optional[int] , UpperCamelCase : int=5_04_32 , UpperCamelCase : List[Any]=61_44 , UpperCamelCase : List[str]=44 , UpperCamelCase : List[Any]=64 , UpperCamelCase : List[Any]=2_45_76 , UpperCamelCase : str="gelu" , UpperCamelCase : Union[str, Any]=0.25 , UpperCamelCase : Union[str, Any]=1_00_00 , UpperCamelCase : Union[str, Any]=0.0 , UpperCamelCase : List[str]=0.0 , UpperCamelCase : int=0.1 , UpperCamelCase : Dict=20_48 , UpperCamelCase : Dict=0.02 , UpperCamelCase : Dict=1e-5 , UpperCamelCase : List[Any]=True , UpperCamelCase : Optional[int]=0 , UpperCamelCase : List[Any]=2 , UpperCamelCase : str=False , UpperCamelCase : Optional[Any]=True , UpperCamelCase : str=None , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
_snake_case : Tuple = vocab_size
_snake_case : Tuple = max_position_embeddings
_snake_case : Optional[int] = hidden_size
_snake_case : Union[str, Any] = num_hidden_layers
_snake_case : Dict = num_attention_heads
_snake_case : Optional[int] = intermediate_size
_snake_case : Dict = hidden_act
_snake_case : Tuple = rotary_pct
_snake_case : int = rotary_emb_base
_snake_case : Any = attention_dropout
_snake_case : Optional[int] = hidden_dropout
_snake_case : List[str] = classifier_dropout
_snake_case : int = initializer_range
_snake_case : List[str] = layer_norm_eps
_snake_case : List[Any] = use_cache
_snake_case : int = tie_word_embeddings
_snake_case : Optional[int] = use_parallel_residual
_snake_case : List[str] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'The hidden size is not divisble by the number of attention heads! Make sure to update them!' )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , UpperCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f"""got {self.rope_scaling}""" )
_snake_case : Optional[int] = self.rope_scaling.get('type' , UpperCamelCase )
_snake_case : Any = self.rope_scaling.get('factor' , UpperCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(UpperCamelCase , UpperCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 260 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Tuple =RobertaTokenizer
a_ : Tuple =RobertaTokenizerFast
a_ : Union[str, Any] =True
a_ : List[Any] ={"""cls_token""": """<s>"""}
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case : str = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_snake_case : Optional[int] = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
_snake_case : List[str] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_snake_case : List[str] = {'unk_token': '<unk>'}
_snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCamelCase ) )
def UpperCamelCase_ ( self : List[str] , **UpperCamelCase : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase_ ( self : Optional[int] , **UpperCamelCase : List[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = 'lower newer'
_snake_case : int = 'lower newer'
return input_text, output_text
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : List[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_snake_case : List[str] = 'lower newer'
_snake_case : List[str] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
_snake_case : Any = tokenizer.tokenize(UpperCamelCase ) # , add_prefix_space=True)
self.assertListEqual(UpperCamelCase , UpperCamelCase )
_snake_case : Any = tokens + [tokenizer.unk_token]
_snake_case : Dict = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Any = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=UpperCamelCase ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=UpperCamelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer_class.from_pretrained('roberta-base' )
_snake_case : Tuple = tokenizer.encode('sequence builders' , add_special_tokens=UpperCamelCase )
_snake_case : int = tokenizer.encode('multi-sequence build' , add_special_tokens=UpperCamelCase )
_snake_case : Dict = tokenizer.encode(
'sequence builders' , add_special_tokens=UpperCamelCase , add_prefix_space=UpperCamelCase )
_snake_case : Optional[int] = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=UpperCamelCase , add_prefix_space=UpperCamelCase )
_snake_case : List[str] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase )
_snake_case : Tuple = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_tokenizer()
_snake_case : int = 'Encode this sequence.'
_snake_case : str = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
_snake_case : int = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase , add_prefix_space=UpperCamelCase )
_snake_case : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(UpperCamelCase , UpperCamelCase )
_snake_case : Optional[int] = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase , add_prefix_space=UpperCamelCase )
_snake_case : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(UpperCamelCase , UpperCamelCase )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
_snake_case : List[Any] = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
_snake_case : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(UpperCamelCase , UpperCamelCase )
# Testing spaces after special tokens
_snake_case : Dict = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase )} ) # mask token has a left space
_snake_case : int = tokenizer.convert_tokens_to_ids(UpperCamelCase )
_snake_case : List[Any] = 'Encode <mask> sequence'
_snake_case : Any = 'Encode <mask>sequence'
_snake_case : Optional[int] = tokenizer.encode(UpperCamelCase )
_snake_case : str = encoded.index(UpperCamelCase )
_snake_case : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(UpperCamelCase , UpperCamelCase )
_snake_case : Tuple = tokenizer.encode(UpperCamelCase )
_snake_case : Tuple = encoded.index(UpperCamelCase )
_snake_case : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : Any = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
_snake_case : Union[str, Any] = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
_snake_case : Tuple = 'A, <mask> AllenNLP sentence.'
_snake_case : str = tokenizer_r.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase )
_snake_case : Optional[int] = tokenizer_p.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_snake_case : Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_snake_case : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
UpperCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_snake_case : int = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_snake_case : Dict = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , UpperCamelCase )
self.assertEqual(post_processor_state['add_prefix_space'] , UpperCamelCase )
self.assertEqual(post_processor_state['trim_offsets'] , UpperCamelCase )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : List[str] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
_snake_case : Tuple = f"""{text_of_1_token} {text_of_1_token}"""
_snake_case : int = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : int = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase ) + 1, len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
_snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : Tuple = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase ) + 1, len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
_snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : Optional[int] = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase ), len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
_snake_case : Any = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : Tuple = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase ), len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
_snake_case : str = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : Dict = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase ) + 1, 1 + len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
_snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : str = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase ), 1 + len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
_snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : Any = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase ), 1 + len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
| 260 | 1 |
import collections
import os
import re
from pathlib import Path
a_ = 'src/transformers'
# Matches is_xxx_available()
a_ = re.compile(r'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
a_ = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a_ = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
a_ = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
a_ = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a_ = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
a_ = re.compile(r'^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
a_ = re.compile(r'^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
a_ = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
a_ = re.compile(r'^\s*try:')
# Catches a line with else:
a_ = re.compile(r'^\s*else:')
def lowerCamelCase__ ( _a):
if _re_test_backend.search(_a) is None:
return None
SCREAMING_SNAKE_CASE : int = [b[0] for b in _re_backend.findall(_a)]
backends.sort()
return "_and_".join(_a)
def lowerCamelCase__ ( _a):
with open(_a , "r" , encoding="utf-8" , newline="\n") as f:
SCREAMING_SNAKE_CASE : List[str] = f.readlines()
SCREAMING_SNAKE_CASE : Any = 0
while line_index < len(_a) and not lines[line_index].startswith("_import_structure = {"):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_a):
return None
# First grab the objects without a specific backend in _import_structure
SCREAMING_SNAKE_CASE : Dict = []
while not lines[line_index].startswith("if TYPE_CHECKING") and find_backend(lines[line_index]) is None:
SCREAMING_SNAKE_CASE : Tuple = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_a):
SCREAMING_SNAKE_CASE : List[Any] = _re_one_line_import_struct.search(_a).groups()[0]
SCREAMING_SNAKE_CASE : Any = re.findall(r"\[([^\]]+)\]" , _a)
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", ")])
line_index += 1
continue
SCREAMING_SNAKE_CASE : Tuple = _re_import_struct_key_value.search(_a)
if single_line_import_search is not None:
SCREAMING_SNAKE_CASE : int = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", ") if len(_a) > 0]
objects.extend(_a)
elif line.startswith(" " * 8 + "\""):
objects.append(line[9:-3])
line_index += 1
SCREAMING_SNAKE_CASE : Any = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING"):
# If the line is an if not is_backend_available, we grab all objects associated.
SCREAMING_SNAKE_CASE : Any = find_backend(lines[line_index])
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1]) is None:
SCREAMING_SNAKE_CASE : str = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index]) is None:
line_index += 1
line_index += 1
SCREAMING_SNAKE_CASE : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 4):
SCREAMING_SNAKE_CASE : Union[str, Any] = lines[line_index]
if _re_import_struct_add_one.search(_a) is not None:
objects.append(_re_import_struct_add_one.search(_a).groups()[0])
elif _re_import_struct_add_many.search(_a) is not None:
SCREAMING_SNAKE_CASE : Any = _re_import_struct_add_many.search(_a).groups()[0].split(", ")
SCREAMING_SNAKE_CASE : Optional[int] = [obj[1:-1] for obj in imports if len(_a) > 0]
objects.extend(_a)
elif _re_between_brackets.search(_a) is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = _re_between_brackets.search(_a).groups()[0].split(", ")
SCREAMING_SNAKE_CASE : int = [obj[1:-1] for obj in imports if len(_a) > 0]
objects.extend(_a)
elif _re_quote_object.search(_a) is not None:
objects.append(_re_quote_object.search(_a).groups()[0])
elif line.startswith(" " * 8 + "\""):
objects.append(line[9:-3])
elif line.startswith(" " * 12 + "\""):
objects.append(line[13:-3])
line_index += 1
SCREAMING_SNAKE_CASE : Any = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
SCREAMING_SNAKE_CASE : Any = []
while (
line_index < len(_a)
and find_backend(lines[line_index]) is None
and not lines[line_index].startswith("else")
):
SCREAMING_SNAKE_CASE : Union[str, Any] = lines[line_index]
SCREAMING_SNAKE_CASE : int = _re_import.search(_a)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", "))
elif line.startswith(" " * 8):
objects.append(line[8:-2])
line_index += 1
SCREAMING_SNAKE_CASE : str = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(_a):
# If the line is an if is_backend_available, we grab all objects associated.
SCREAMING_SNAKE_CASE : int = find_backend(lines[line_index])
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1]) is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index]) is None:
line_index += 1
line_index += 1
SCREAMING_SNAKE_CASE : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 8):
SCREAMING_SNAKE_CASE : Optional[Any] = lines[line_index]
SCREAMING_SNAKE_CASE : Dict = _re_import.search(_a)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", "))
elif line.startswith(" " * 12):
objects.append(line[12:-2])
line_index += 1
SCREAMING_SNAKE_CASE : List[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowerCamelCase__ ( _a , _a):
def find_duplicates(_a):
return [k for k, v in collections.Counter(_a).items() if v > 1]
if list(import_dict_objects.keys()) != list(type_hint_objects.keys()):
return ["Both sides of the init do not have the same backends!"]
SCREAMING_SNAKE_CASE : Dict = []
for key in import_dict_objects.keys():
SCREAMING_SNAKE_CASE : List[Any] = find_duplicates(import_dict_objects[key])
if duplicate_imports:
errors.append(f"Duplicate _import_structure definitions for: {duplicate_imports}")
SCREAMING_SNAKE_CASE : Dict = find_duplicates(type_hint_objects[key])
if duplicate_type_hints:
errors.append(f"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}")
if sorted(set(import_dict_objects[key])) != sorted(set(type_hint_objects[key])):
SCREAMING_SNAKE_CASE : Dict = "base imports" if key == "none" else f"{key} backend"
errors.append(f"Differences for {name}:")
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f" {a} in TYPE_HINT but not in _import_structure.")
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f" {a} in _import_structure but not in TYPE_HINT.")
return errors
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : List[Any] = []
for root, _, files in os.walk(_a):
if "__init__.py" in files:
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(_a , "__init__.py")
SCREAMING_SNAKE_CASE : List[str] = parse_init(_a)
if objects is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = analyze_results(*_a)
if len(_a) > 0:
SCREAMING_SNAKE_CASE : List[str] = f"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(_a))
if len(_a) > 0:
raise ValueError("\n\n".join(_a))
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Dict = []
for path, directories, files in os.walk(_a):
for folder in directories:
# Ignore private modules
if folder.startswith("_"):
directories.remove(_a)
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_a) / folder).glob("*.py"))) == 0:
continue
SCREAMING_SNAKE_CASE : Union[str, Any] = str((Path(_a) / folder).relative_to(_a))
SCREAMING_SNAKE_CASE : int = short_path.replace(os.path.sep , ".")
submodules.append(_a)
for fname in files:
if fname == "__init__.py":
continue
SCREAMING_SNAKE_CASE : Dict = str((Path(_a) / fname).relative_to(_a))
SCREAMING_SNAKE_CASE : int = short_path.replace(".py" , "").replace(os.path.sep , ".")
if len(submodule.split(".")) == 1:
submodules.append(_a)
return submodules
a_ = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
'models.esm.openfold_utils',
]
def lowerCamelCase__ ( ):
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
SCREAMING_SNAKE_CASE : str = direct_transformers_import(_a)
SCREAMING_SNAKE_CASE : Optional[Any] = set(transformers._import_structure.keys())
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(_a , "__init__.py") , "r") as f:
SCREAMING_SNAKE_CASE : Optional[int] = f.read()
import_structure_keys.update(set(re.findall(r"import_structure\[\"([^\"]*)\"\]" , _a)))
SCREAMING_SNAKE_CASE : str = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(_a) > 0:
SCREAMING_SNAKE_CASE : int = "\n".join(f"- {module}" for module in module_not_registered)
raise ValueError(
"The following submodules are not properly registed in the main init of Transformers:\n"
f"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.")
if __name__ == "__main__":
check_all_inits()
check_submodules() | 76 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Dict=13 , UpperCamelCase__: int=30 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: Tuple=3 , UpperCamelCase__: Any=True , UpperCamelCase__: Dict=True , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Tuple=2 , UpperCamelCase__: Tuple=4 , UpperCamelCase__: Optional[Any]=37 , UpperCamelCase__: List[Any]="gelu" , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: Tuple=10 , UpperCamelCase__: Optional[Any]=0.02 , UpperCamelCase__: List[Any]=3 , UpperCamelCase__: str=0.6 , UpperCamelCase__: str=None , ):
lowerCamelCase__ : List[Any] = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Union[str, Any] = image_size
lowerCamelCase__ : Any = patch_size
lowerCamelCase__ : Union[str, Any] = num_channels
lowerCamelCase__ : Optional[Any] = is_training
lowerCamelCase__ : int = use_labels
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : Optional[Any] = intermediate_size
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : Any = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : Dict = type_sequence_label_size
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : List[str] = mask_ratio
lowerCamelCase__ : Optional[int] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCamelCase__ : Any = (image_size // patch_size) ** 2
lowerCamelCase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : List[Any] = None
if self.use_labels:
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: Any ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict , UpperCamelCase__: List[str] ):
lowerCamelCase__ : Tuple = TFViTMAEModel(config=UpperCamelCase__ )
lowerCamelCase__ : int = model(UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: Any ):
lowerCamelCase__ : Optional[int] = TFViTMAEForPreTraining(UpperCamelCase__ )
lowerCamelCase__ : int = model(UpperCamelCase__ , training=UpperCamelCase__ )
# expected sequence length = num_patches
lowerCamelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
lowerCamelCase__ : Union[str, Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCamelCase__ : List[Any] = 1
lowerCamelCase__ : Union[str, Any] = TFViTMAEForPreTraining(UpperCamelCase__ )
lowerCamelCase__ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ , training=UpperCamelCase__ )
lowerCamelCase__ : int = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) : Optional[int] = config_and_inputs
lowerCamelCase__ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
a = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : int = TFViTMAEModelTester(self )
lowerCamelCase__ : List[Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def lowerCamelCase_ ( self: Any ):
pass
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowerCamelCase__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , tf.keras.layers.Layer ) )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : str = [*signature.parameters.keys()]
lowerCamelCase__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
# make the mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , noise=UpperCamelCase__ )
lowerCamelCase__ : int = copy.deepcopy(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : List[str] = model(**UpperCamelCase__ , noise=UpperCamelCase__ )
lowerCamelCase__ : str = outputs_dict[0].numpy()
lowerCamelCase__ : Optional[int] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def lowerCamelCase_ ( self: Dict ):
# make the mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Tuple = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(UpperCamelCase__: int ):
lowerCamelCase__ : Optional[int] = {}
for k, v in inputs_dict.items():
if tf.is_tensor(UpperCamelCase__ ):
lowerCamelCase__ : List[str] = v.numpy()
else:
lowerCamelCase__ : Union[str, Any] = np.array(UpperCamelCase__ )
return inputs_np_dict
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : str = prepare_numpy_arrays(UpperCamelCase__ )
lowerCamelCase__ : int = model(UpperCamelCase__ , noise=UpperCamelCase__ )
lowerCamelCase__ : Any = model(**UpperCamelCase__ , noise=UpperCamelCase__ )
self.assert_outputs_same(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Dict , UpperCamelCase__: Any , UpperCamelCase__: str ):
# make masks reproducible
np.random.seed(2 )
lowerCamelCase__ : List[str] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowerCamelCase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ : Optional[int] = tf.constant(UpperCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCamelCase__ : Tuple = tf_noise
super().check_pt_tf_models(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : List[Any] = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(UpperCamelCase__ )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(UpperCamelCase__ , UpperCamelCase__ ),)
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(UpperCamelCase__ , """_keras_serializable""" , UpperCamelCase__ )
}
lowerCamelCase__ : List[str] = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ : List[str] = tf.convert_to_tensor(UpperCamelCase__ )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
lowerCamelCase__ : List[str] = main_layer_class(UpperCamelCase__ )
lowerCamelCase__ : int = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowerCamelCase__ : List[str] = tf.keras.Model(UpperCamelCase__ , outputs=main_layer(UpperCamelCase__ ) )
lowerCamelCase__ : Union[str, Any] = model(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ : int = os.path.join(UpperCamelCase__ , """keras_model.h5""" )
model.save(UpperCamelCase__ )
lowerCamelCase__ : int = tf.keras.models.load_model(
UpperCamelCase__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(UpperCamelCase__ , tf.keras.Model )
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.assert_outputs_same(UpperCamelCase__ , UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: str ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Tuple = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ : Tuple = model_class(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , noise=UpperCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
lowerCamelCase__ : Any = outputs.last_hidden_state.numpy()
lowerCamelCase__ : List[str] = 0
else:
lowerCamelCase__ : int = outputs.logits.numpy()
lowerCamelCase__ : Dict = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ , saved_model=UpperCamelCase__ )
lowerCamelCase__ : Dict = model_class.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Tuple = model(UpperCamelCase__ , noise=UpperCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
lowerCamelCase__ : str = after_outputs["""last_hidden_state"""].numpy()
lowerCamelCase__ : Optional[Any] = 0
else:
lowerCamelCase__ : Union[str, Any] = after_outputs["""logits"""].numpy()
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase__ , 1e-5 )
def lowerCamelCase_ ( self: Any ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[int] = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(UpperCamelCase__ )
lowerCamelCase__ : Any = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[str] = model(UpperCamelCase__ , noise=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(UpperCamelCase__ )
lowerCamelCase__ : int = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowerCamelCase__ : int = model_class.from_config(model.config )
lowerCamelCase__ : List[Any] = new_model(UpperCamelCase__ ) # Build model
new_model.set_weights(model.get_weights() )
lowerCamelCase__ : List[Any] = new_model(UpperCamelCase__ , noise=UpperCamelCase__ )
self.assert_outputs_same(UpperCamelCase__ , UpperCamelCase__ )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Tuple = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> List[Any]:
lowerCamelCase__ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Optional[Any] ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: List[str] ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCamelCase__ : Optional[int] = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
lowerCamelCase__ : Dict = self.default_image_processor
lowerCamelCase__ : int = prepare_img()
lowerCamelCase__ : List[Any] = image_processor(images=UpperCamelCase__ , return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCamelCase__ : Tuple = ViTMAEConfig()
lowerCamelCase__ : Dict = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCamelCase__ : str = np.random.uniform(size=(1, num_patches) )
# forward pass
lowerCamelCase__ : str = model(**UpperCamelCase__ , noise=UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : Any = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : str = tf.convert_to_tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
| 41 | 0 |
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if isinstance(__UpperCamelCase , torch.Tensor ):
return image
elif isinstance(__UpperCamelCase , PIL.Image.Image ):
__SCREAMING_SNAKE_CASE = [image]
if isinstance(image[0] , PIL.Image.Image ):
__SCREAMING_SNAKE_CASE = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
__SCREAMING_SNAKE_CASE = np.concatenate(__UpperCamelCase , axis=0 )
__SCREAMING_SNAKE_CASE = np.array(__UpperCamelCase ).astype(np.floataa ) / 255.0
__SCREAMING_SNAKE_CASE = image.transpose(0 , 3 , 1 , 2 )
__SCREAMING_SNAKE_CASE = 2.0 * image - 1.0
__SCREAMING_SNAKE_CASE = torch.from_numpy(__UpperCamelCase )
elif isinstance(image[0] , torch.Tensor ):
__SCREAMING_SNAKE_CASE = torch.cat(__UpperCamelCase , dim=0 )
return image
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=0.9_995 ):
if not isinstance(__UpperCamelCase , np.ndarray ):
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = va.device
__SCREAMING_SNAKE_CASE = va.cpu().numpy()
__SCREAMING_SNAKE_CASE = va.cpu().numpy()
__SCREAMING_SNAKE_CASE = np.sum(va * va / (np.linalg.norm(__UpperCamelCase ) * np.linalg.norm(__UpperCamelCase )) )
if np.abs(__UpperCamelCase ) > DOT_THRESHOLD:
__SCREAMING_SNAKE_CASE = (1 - t) * va + t * va
else:
__SCREAMING_SNAKE_CASE = np.arccos(__UpperCamelCase )
__SCREAMING_SNAKE_CASE = np.sin(__UpperCamelCase )
__SCREAMING_SNAKE_CASE = theta_a * t
__SCREAMING_SNAKE_CASE = np.sin(__UpperCamelCase )
__SCREAMING_SNAKE_CASE = np.sin(theta_a - theta_t ) / sin_theta_a
__SCREAMING_SNAKE_CASE = sin_theta_t / sin_theta_a
__SCREAMING_SNAKE_CASE = sa * va + sa * va
if inputs_are_torch:
__SCREAMING_SNAKE_CASE = torch.from_numpy(__UpperCamelCase ).to(__UpperCamelCase )
return va
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = F.normalize(__UpperCamelCase , dim=-1 )
__SCREAMING_SNAKE_CASE = F.normalize(__UpperCamelCase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
for param in model.parameters():
__SCREAMING_SNAKE_CASE = value
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ):
super().__init__()
self.register_modules(
vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , clip_model=UpperCamelCase__ , tokenizer=UpperCamelCase__ , unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , coca_model=UpperCamelCase__ , coca_tokenizer=UpperCamelCase__ , coca_transform=UpperCamelCase__ , )
__SCREAMING_SNAKE_CASE = (
feature_extractor.size
if isinstance(feature_extractor.size , UpperCamelCase__)
else feature_extractor.size["""shortest_edge"""]
)
__SCREAMING_SNAKE_CASE = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std)
set_requires_grad(self.text_encoder , UpperCamelCase__)
set_requires_grad(self.clip_model , UpperCamelCase__)
def snake_case_ ( self , lowerCAmelCase__ = "auto"):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__SCREAMING_SNAKE_CASE = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase__)
def snake_case_ ( self):
self.enable_attention_slicing(UpperCamelCase__)
def snake_case_ ( self):
set_requires_grad(self.vae , UpperCamelCase__)
def snake_case_ ( self):
set_requires_grad(self.vae , UpperCamelCase__)
def snake_case_ ( self):
set_requires_grad(self.unet , UpperCamelCase__)
def snake_case_ ( self):
set_requires_grad(self.unet , UpperCamelCase__)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = min(int(num_inference_steps * strength) , UpperCamelCase__)
__SCREAMING_SNAKE_CASE = max(num_inference_steps - init_timestep , 0)
__SCREAMING_SNAKE_CASE = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None):
if not isinstance(UpperCamelCase__ , torch.Tensor):
raise ValueError(f"`image` has to be of type `torch.Tensor` but is {type(UpperCamelCase__)}")
__SCREAMING_SNAKE_CASE = image.to(device=UpperCamelCase__ , dtype=UpperCamelCase__)
if isinstance(UpperCamelCase__ , UpperCamelCase__):
__SCREAMING_SNAKE_CASE = [
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(UpperCamelCase__)
]
__SCREAMING_SNAKE_CASE = torch.cat(UpperCamelCase__ , dim=0)
else:
__SCREAMING_SNAKE_CASE = self.vae.encode(UpperCamelCase__).latent_dist.sample(UpperCamelCase__)
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__SCREAMING_SNAKE_CASE = 0.1_82_15 * init_latents
__SCREAMING_SNAKE_CASE = init_latents.repeat_interleave(UpperCamelCase__ , dim=0)
__SCREAMING_SNAKE_CASE = randn_tensor(init_latents.shape , generator=UpperCamelCase__ , device=UpperCamelCase__ , dtype=UpperCamelCase__)
# get latents
__SCREAMING_SNAKE_CASE = self.scheduler.add_noise(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
__SCREAMING_SNAKE_CASE = init_latents
return latents
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.coca_transform(UpperCamelCase__).unsqueeze(0)
with torch.no_grad(), torch.cuda.amp.autocast():
__SCREAMING_SNAKE_CASE = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype))
__SCREAMING_SNAKE_CASE = self.coca_tokenizer.decode(generated[0].cpu().numpy())
return generated.split("""<end_of_text>""")[0].replace("""<start_of_text>""" , """""").rstrip(""" .,""")
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.feature_extractor.preprocess(UpperCamelCase__)
__SCREAMING_SNAKE_CASE = torch.from_numpy(clip_image_input["""pixel_values"""][0]).unsqueeze(0).to(self.device).half()
__SCREAMING_SNAKE_CASE = self.clip_model.get_image_features(UpperCamelCase__)
__SCREAMING_SNAKE_CASE = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=UpperCamelCase__)
__SCREAMING_SNAKE_CASE = image_embeddings_clip.repeat_interleave(UpperCamelCase__ , dim=0)
return image_embeddings_clip
@torch.enable_grad()
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
__SCREAMING_SNAKE_CASE = latents.detach().requires_grad_()
__SCREAMING_SNAKE_CASE = self.scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__)
# predict the noise residual
__SCREAMING_SNAKE_CASE = self.unet(UpperCamelCase__ , UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
__SCREAMING_SNAKE_CASE = self.scheduler.alphas_cumprod[timestep]
__SCREAMING_SNAKE_CASE = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__SCREAMING_SNAKE_CASE = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__SCREAMING_SNAKE_CASE = torch.sqrt(UpperCamelCase__)
__SCREAMING_SNAKE_CASE = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , UpperCamelCase__):
__SCREAMING_SNAKE_CASE = self.scheduler.sigmas[index]
__SCREAMING_SNAKE_CASE = latents - sigma * noise_pred
else:
raise ValueError(f"scheduler type {type(self.scheduler)} not supported")
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__SCREAMING_SNAKE_CASE = 1 / 0.1_82_15 * sample
__SCREAMING_SNAKE_CASE = self.vae.decode(UpperCamelCase__).sample
__SCREAMING_SNAKE_CASE = (image / 2 + 0.5).clamp(0 , 1)
__SCREAMING_SNAKE_CASE = transforms.Resize(self.feature_extractor_size)(UpperCamelCase__)
__SCREAMING_SNAKE_CASE = self.normalize(UpperCamelCase__).to(latents.dtype)
__SCREAMING_SNAKE_CASE = self.clip_model.get_image_features(UpperCamelCase__)
__SCREAMING_SNAKE_CASE = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=UpperCamelCase__)
__SCREAMING_SNAKE_CASE = spherical_dist_loss(UpperCamelCase__ , UpperCamelCase__).mean() * clip_guidance_scale
__SCREAMING_SNAKE_CASE = -torch.autograd.grad(UpperCamelCase__ , UpperCamelCase__)[0]
if isinstance(self.scheduler , UpperCamelCase__):
__SCREAMING_SNAKE_CASE = latents.detach() + grads * (sigma**2)
__SCREAMING_SNAKE_CASE = noise_pred_original
else:
__SCREAMING_SNAKE_CASE = noise_pred_original - torch.sqrt(UpperCamelCase__) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 5_1_2 , lowerCAmelCase__ = 5_1_2 , lowerCAmelCase__ = 0.6 , lowerCAmelCase__ = 5_0 , lowerCAmelCase__ = 7.5 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 1_0_0 , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , lowerCAmelCase__ = 0.8 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , ):
if isinstance(UpperCamelCase__ , UpperCamelCase__) and len(UpperCamelCase__) != batch_size:
raise ValueError(f"You have passed {batch_size} batch_size, but only {len(UpperCamelCase__)} generators.")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if isinstance(UpperCamelCase__ , torch.Generator) and batch_size > 1:
__SCREAMING_SNAKE_CASE = [generator] + [None] * (batch_size - 1)
__SCREAMING_SNAKE_CASE = [
("""model""", self.coca_model is None),
("""tokenizer""", self.coca_tokenizer is None),
("""transform""", self.coca_transform is None),
]
__SCREAMING_SNAKE_CASE = [x[0] for x in coca_is_none if x[1]]
__SCREAMING_SNAKE_CASE = """, """.join(UpperCamelCase__)
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(UpperCamelCase__):
raise ValueError(
f"Content prompt is None and CoCa [{coca_is_none_str}] is None."
f"Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.")
__SCREAMING_SNAKE_CASE = self.get_image_description(UpperCamelCase__)
if style_prompt is None:
if len(UpperCamelCase__):
raise ValueError(
f"Style prompt is None and CoCa [{coca_is_none_str}] is None."
f" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.")
__SCREAMING_SNAKE_CASE = self.get_image_description(UpperCamelCase__)
# get prompt text embeddings for content and style
__SCREAMING_SNAKE_CASE = self.tokenizer(
UpperCamelCase__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors="""pt""" , )
__SCREAMING_SNAKE_CASE = self.text_encoder(content_text_input.input_ids.to(self.device))[0]
__SCREAMING_SNAKE_CASE = self.tokenizer(
UpperCamelCase__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors="""pt""" , )
__SCREAMING_SNAKE_CASE = self.text_encoder(style_text_input.input_ids.to(self.device))[0]
__SCREAMING_SNAKE_CASE = slerp(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
# duplicate text embeddings for each generation per prompt
__SCREAMING_SNAKE_CASE = text_embeddings.repeat_interleave(UpperCamelCase__ , dim=0)
# set timesteps
__SCREAMING_SNAKE_CASE = """offset""" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
__SCREAMING_SNAKE_CASE = {}
if accepts_offset:
__SCREAMING_SNAKE_CASE = 1
self.scheduler.set_timesteps(UpperCamelCase__ , **UpperCamelCase__)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.get_timesteps(UpperCamelCase__ , UpperCamelCase__ , self.device)
__SCREAMING_SNAKE_CASE = timesteps[:1].repeat(UpperCamelCase__)
# Preprocess image
__SCREAMING_SNAKE_CASE = preprocess(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
__SCREAMING_SNAKE_CASE = self.prepare_latents(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , text_embeddings.dtype , self.device , UpperCamelCase__)
__SCREAMING_SNAKE_CASE = preprocess(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
__SCREAMING_SNAKE_CASE = self.prepare_latents(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , text_embeddings.dtype , self.device , UpperCamelCase__)
__SCREAMING_SNAKE_CASE = slerp(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
if clip_guidance_scale > 0:
__SCREAMING_SNAKE_CASE = self.get_clip_image_embeddings(UpperCamelCase__ , UpperCamelCase__)
__SCREAMING_SNAKE_CASE = self.get_clip_image_embeddings(UpperCamelCase__ , UpperCamelCase__)
__SCREAMING_SNAKE_CASE = slerp(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__SCREAMING_SNAKE_CASE = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE = content_text_input.input_ids.shape[-1]
__SCREAMING_SNAKE_CASE = self.tokenizer([""""""] , padding="""max_length""" , max_length=UpperCamelCase__ , return_tensors="""pt""")
__SCREAMING_SNAKE_CASE = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt
__SCREAMING_SNAKE_CASE = uncond_embeddings.repeat_interleave(UpperCamelCase__ , dim=0)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__SCREAMING_SNAKE_CASE = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__SCREAMING_SNAKE_CASE = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__SCREAMING_SNAKE_CASE = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__SCREAMING_SNAKE_CASE = torch.randn(UpperCamelCase__ , generator=UpperCamelCase__ , device="""cpu""" , dtype=UpperCamelCase__).to(
self.device)
else:
__SCREAMING_SNAKE_CASE = torch.randn(UpperCamelCase__ , generator=UpperCamelCase__ , device=self.device , dtype=UpperCamelCase__)
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
__SCREAMING_SNAKE_CASE = latents.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
__SCREAMING_SNAKE_CASE = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__SCREAMING_SNAKE_CASE = """eta""" in set(inspect.signature(self.scheduler.step).parameters.keys())
__SCREAMING_SNAKE_CASE = {}
if accepts_eta:
__SCREAMING_SNAKE_CASE = eta
# check if the scheduler accepts generator
__SCREAMING_SNAKE_CASE = """generator""" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
__SCREAMING_SNAKE_CASE = generator
with self.progress_bar(total=UpperCamelCase__):
for i, t in enumerate(UpperCamelCase__):
# expand the latents if we are doing classifier free guidance
__SCREAMING_SNAKE_CASE = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__SCREAMING_SNAKE_CASE = self.scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__)
# predict the noise residual
__SCREAMING_SNAKE_CASE = self.unet(UpperCamelCase__ , UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = noise_pred.chunk(2)
__SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__SCREAMING_SNAKE_CASE = (
text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.cond_fn(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
# compute the previous noisy sample x_t -> x_t-1
__SCREAMING_SNAKE_CASE = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__SCREAMING_SNAKE_CASE = 1 / 0.1_82_15 * latents
__SCREAMING_SNAKE_CASE = self.vae.decode(UpperCamelCase__).sample
__SCREAMING_SNAKE_CASE = (image / 2 + 0.5).clamp(0 , 1)
__SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE = self.numpy_to_pil(UpperCamelCase__)
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=UpperCamelCase__ , nsfw_content_detected=UpperCamelCase__)
| 369 |
"""simple docstring"""
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
__magic_name__ = False
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self , lowerCAmelCase__=3_2):
set_seed(0)
__SCREAMING_SNAKE_CASE = UNetaDModel(sample_size=lowerCAmelCase__ , in_channels=3 , out_channels=3)
__SCREAMING_SNAKE_CASE = torch.optim.SGD(model.parameters() , lr=0.00_01)
return model, optimizer
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
__SCREAMING_SNAKE_CASE = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=lowerCAmelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0)
__SCREAMING_SNAKE_CASE = [torch.randn((4, 3, 3_2, 3_2)).clip(-1 , 1).to(lowerCAmelCase__) for _ in range(4)]
__SCREAMING_SNAKE_CASE = [torch.randn((4, 3, 3_2, 3_2)).to(lowerCAmelCase__) for _ in range(4)]
__SCREAMING_SNAKE_CASE = [torch.randint(0 , 1_0_0_0 , (4,)).long().to(lowerCAmelCase__) for _ in range(4)]
# train with a DDPM scheduler
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.get_model_optimizer(resolution=3_2)
model.train().to(lowerCAmelCase__)
for i in range(4):
optimizer.zero_grad()
__SCREAMING_SNAKE_CASE = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , timesteps[i]).sample
__SCREAMING_SNAKE_CASE = torch.nn.functional.mse_loss(lowerCAmelCase__ , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.get_model_optimizer(resolution=3_2)
model.train().to(lowerCAmelCase__)
for i in range(4):
optimizer.zero_grad()
__SCREAMING_SNAKE_CASE = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , timesteps[i]).sample
__SCREAMING_SNAKE_CASE = torch.nn.functional.mse_loss(lowerCAmelCase__ , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5))
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5))
| 255 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """realm"""
def __init__( self , lowercase=30522 , lowercase=768 , lowercase=128 , lowercase=12 , lowercase=12 , lowercase=8 , lowercase=3072 , lowercase="gelu_new" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=256 , lowercase=10 , lowercase=1E-3 , lowercase=5 , lowercase=320 , lowercase=13353718 , lowercase=5000 , lowercase=1 , lowercase=0 , lowercase=2 , **lowercase , ):
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
# Common config
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : int = max_position_embeddings
_lowerCamelCase : List[Any] = hidden_size
_lowerCamelCase : Union[str, Any] = retriever_proj_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : Union[str, Any] = num_candidates
_lowerCamelCase : Dict = intermediate_size
_lowerCamelCase : str = hidden_act
_lowerCamelCase : int = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : Union[str, Any] = type_vocab_size
_lowerCamelCase : str = layer_norm_eps
# Reader config
_lowerCamelCase : List[str] = span_hidden_size
_lowerCamelCase : str = max_span_width
_lowerCamelCase : Any = reader_layer_norm_eps
_lowerCamelCase : List[Any] = reader_beam_size
_lowerCamelCase : Any = reader_seq_len
# Retrieval config
_lowerCamelCase : Tuple = num_block_records
_lowerCamelCase : str = searcher_beam_size | 96 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = StableDiffusionInstructPixaPixPipeline
a_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase ( self : Optional[int] ) -> Optional[int]:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
__lowerCAmelCase = PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
__lowerCAmelCase = CLIPTextModel(lowerCAmelCase_ )
__lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__lowerCAmelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple=0 ) -> Dict:
__lowerCAmelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert('RGB' )
if str(lowerCAmelCase_ ).startswith('mps' ):
__lowerCAmelCase = torch.manual_seed(lowerCAmelCase_ )
else:
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
__lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : Tuple ) -> List[Any]:
__lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
__lowerCAmelCase = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowerCAmelCase = sd_pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__lowerCAmelCase = np.array([0.75_26, 0.37_50, 0.45_47, 0.61_17, 0.58_66, 0.50_16, 0.43_27, 0.56_42, 0.48_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self : List[str] ) -> Dict:
__lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
__lowerCAmelCase = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowerCAmelCase = 'french fries'
__lowerCAmelCase = sd_pipe(**lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__lowerCAmelCase = np.array([0.75_11, 0.36_42, 0.45_53, 0.62_36, 0.57_97, 0.50_13, 0.43_43, 0.56_11, 0.48_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self : List[str] ) -> Any:
__lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
__lowerCAmelCase = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowerCAmelCase = [inputs['prompt']] * 2
__lowerCAmelCase = np.array(inputs['image'] ).astype(np.floataa ) / 2_55.0
__lowerCAmelCase = torch.from_numpy(lowerCAmelCase_ ).unsqueeze(0 ).to(lowerCAmelCase_ )
__lowerCAmelCase = image / 2 + 0.5
__lowerCAmelCase = image.permute(0 , 3 , 1 , 2 )
__lowerCAmelCase = image.repeat(2 , 1 , 1 , 1 )
__lowerCAmelCase = sd_pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
__lowerCAmelCase = np.array([0.58_12, 0.57_48, 0.52_22, 0.59_08, 0.56_95, 0.71_74, 0.68_04, 0.55_23, 0.55_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' )
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
__lowerCAmelCase = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowerCAmelCase = sd_pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = [round(lowerCAmelCase_ , 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(lowerCAmelCase_ ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
__lowerCAmelCase = np.array([0.74_17, 0.38_42, 0.47_32, 0.57_76, 0.58_91, 0.51_39, 0.40_52, 0.56_73, 0.49_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self : Optional[int] ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
__lowerCAmelCase = VaeImageProcessor(do_resize=lowerCAmelCase_ , do_normalize=lowerCAmelCase_ )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase_ , input_image_type='pt' ) )[0]
__lowerCAmelCase = components['vae']
__lowerCAmelCase = self.get_dummy_inputs_by_type(lowerCAmelCase_ , input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__lowerCAmelCase = vae.encode(inputs[image_param] ).latent_dist.mode()
__lowerCAmelCase = pipe(**lowerCAmelCase_ )[0]
__lowerCAmelCase = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCAmelCase_ , 1e-4 , 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : int ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : List[str] , lowerCAmelCase_ : List[Any]=0 ) -> Any:
__lowerCAmelCase = torch.manual_seed(lowerCAmelCase_ )
__lowerCAmelCase = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
__lowerCAmelCase = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : List[Any] ) -> str:
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
__lowerCAmelCase = self.get_inputs()
__lowerCAmelCase = pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase = np.array([0.59_02, 0.60_15, 0.60_27, 0.59_83, 0.60_92, 0.60_61, 0.57_65, 0.57_85, 0.55_55] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase ( self : Tuple ) -> List[str]:
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ )
__lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
__lowerCAmelCase = self.get_inputs()
__lowerCAmelCase = pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase = np.array([0.65_78, 0.68_17, 0.69_72, 0.67_61, 0.68_56, 0.69_16, 0.64_28, 0.65_16, 0.63_01] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase ( self : Optional[Any] ) -> Dict:
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ )
__lowerCAmelCase = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
__lowerCAmelCase = self.get_inputs()
__lowerCAmelCase = pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase = np.array([0.38_28, 0.38_34, 0.38_18, 0.37_92, 0.38_65, 0.37_52, 0.37_92, 0.38_47, 0.37_53] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase ( self : Optional[int] ) -> int:
__lowerCAmelCase = 0
def callback_fn(lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : torch.FloatTensor ) -> None:
__lowerCAmelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__lowerCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
__lowerCAmelCase = latents[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([-0.24_63, -0.46_44, -0.97_56, 1.51_76, 1.44_14, 0.78_66, 0.98_97, 0.85_21, 0.79_83] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
__lowerCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
__lowerCAmelCase = latents[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([-0.26_44, -0.46_26, -0.96_53, 1.51_76, 1.45_51, 0.76_86, 0.98_05, 0.84_52, 0.81_15] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
__lowerCAmelCase = False
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ , torch_dtype=torch.floataa )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
__lowerCAmelCase = self.get_inputs()
pipe(**lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase ( self : Optional[int] ) -> Any:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ , torch_dtype=torch.floataa )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowerCAmelCase = self.get_inputs()
__lowerCAmelCase = pipe(**lowerCAmelCase_ )
__lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def lowercase ( self : List[Any] ) -> Any:
__lowerCAmelCase = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__lowerCAmelCase = inputs['image'].resize((5_0_4, 5_0_4) )
__lowerCAmelCase = 'timbrooks/instruct-pix2pix'
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
__lowerCAmelCase = pipe(**lowerCAmelCase_ )
__lowerCAmelCase = output.images[0]
__lowerCAmelCase = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
__lowerCAmelCase = np.array([0.27_26, 0.25_29, 0.26_64, 0.26_55, 0.26_41, 0.26_42, 0.25_91, 0.26_49, 0.25_90] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 284 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["ConditionalDetrFeatureExtractor"]
__UpperCamelCase = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 365 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> bool:
"""simple docstring"""
__snake_case : Optional[int] = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def _a ( _lowerCamelCase = 5000 ) -> int:
"""simple docstring"""
__snake_case : int = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCamelCase )]
for i, pentagonal_i in enumerate(_lowerCamelCase ):
for j in range(_lowerCamelCase , len(_lowerCamelCase ) ):
__snake_case : Optional[int] = pentagonal_nums[j]
__snake_case : str = pentagonal_i + pentagonal_j
__snake_case : List[Any] = pentagonal_j - pentagonal_i
if is_pentagonal(_lowerCamelCase ) and is_pentagonal(_lowerCamelCase ):
return b
return -1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 13 | 0 |
from sklearn.metrics import recall_score
import datasets
lowerCamelCase__ = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
lowerCamelCase__ = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{\'recall\': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{\'recall\': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{\'recall\': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'recall\': array([1., 0., 0.])}
"""
lowerCamelCase__ = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'] , )
def _lowerCamelCase ( self : Any , a : Tuple , a : Union[str, Any] , a : Optional[int]=None , a : List[str]=1 , a : List[Any]="binary" , a : List[Any]=None , a : str="warn" , ):
'''simple docstring'''
lowerCAmelCase__ : str = recall_score(
a , a , labels=a , pos_label=a , average=a , sample_weight=a , zero_division=a , )
return {"recall": float(a ) if score.size == 1 else score} | 212 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["image_processor", "tokenizer"]
UpperCamelCase__ = "Pix2StructImageProcessor"
UpperCamelCase__ = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = False
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self , UpperCAmelCase=None , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 2048 , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = True , UpperCAmelCase = None , **UpperCAmelCase , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None and not self.image_processor.is_vqa:
_UpperCAmelCase = self.tokenizer
_UpperCAmelCase = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
_UpperCAmelCase = self.image_processor(
UpperCAmelCase , return_tensors=UpperCAmelCase , max_patches=UpperCAmelCase , **UpperCAmelCase )
else:
# add pixel_values and bbox
_UpperCAmelCase = self.image_processor(
UpperCAmelCase , return_tensors=UpperCAmelCase , max_patches=UpperCAmelCase , header_text=UpperCAmelCase , **UpperCAmelCase )
if text is not None and not self.image_processor.is_vqa:
_UpperCAmelCase = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
if "attention_mask" in text_encoding:
_UpperCAmelCase = text_encoding.pop('attention_mask' )
if "input_ids" in text_encoding:
_UpperCAmelCase = text_encoding.pop('input_ids' )
else:
_UpperCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase )
return encoding_image_processor
def UpperCamelCase ( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 39 | 0 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class __lowerCAmelCase :
lowerCamelCase_ : str
lowerCamelCase_ : str = None
@staticmethod
def lowerCamelCase () -> str:
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> str:
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
F'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def lowerCamelCase (cls ) -> List[Any]:
'''simple docstring'''
return F'''`pip install {cls.pip_package or cls.name}`'''
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Optional[int] = '''optuna'''
@staticmethod
def lowerCamelCase () -> Dict:
'''simple docstring'''
return is_optuna_available()
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Any:
'''simple docstring'''
return run_hp_search_optuna(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
return default_hp_space_optuna(__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Any = '''ray'''
lowerCamelCase_ : Tuple = '''\'ray[tune]\''''
@staticmethod
def lowerCamelCase () -> Any:
'''simple docstring'''
return is_ray_available()
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Dict:
'''simple docstring'''
return run_hp_search_ray(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
return default_hp_space_ray(__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Optional[int] = '''sigopt'''
@staticmethod
def lowerCamelCase () -> int:
'''simple docstring'''
return is_sigopt_available()
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> List[str]:
'''simple docstring'''
return run_hp_search_sigopt(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
return default_hp_space_sigopt(__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Dict = '''wandb'''
@staticmethod
def lowerCamelCase () -> Optional[Any]:
'''simple docstring'''
return is_wandb_available()
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> int:
'''simple docstring'''
return run_hp_search_wandb(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> str:
'''simple docstring'''
return default_hp_space_wandb(__magic_name__ )
lowerCAmelCase_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowerCamelCase_ ( ) -> str:
"""simple docstring"""
snake_case_ : Tuple = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_UpperCamelCase ) > 0:
snake_case_ : Optional[Any] = available_backends[0].name
if len(_UpperCamelCase ) > 1:
logger.info(
f'''{len(_UpperCamelCase )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
f''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 279 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''microsoft/biogpt''': '''https://huggingface.co/microsoft/biogpt/resolve/main/config.json''',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Any = '''biogpt'''
def __init__(self , __magic_name__=4_2384 , __magic_name__=1024 , __magic_name__=24 , __magic_name__=16 , __magic_name__=4096 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1024 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=True , __magic_name__=True , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , **__magic_name__ , ) -> List[str]:
'''simple docstring'''
snake_case_ : List[str] = vocab_size
snake_case_ : Dict = max_position_embeddings
snake_case_ : Optional[int] = hidden_size
snake_case_ : List[Any] = num_hidden_layers
snake_case_ : List[str] = num_attention_heads
snake_case_ : int = intermediate_size
snake_case_ : List[Any] = hidden_act
snake_case_ : List[Any] = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : Optional[int] = initializer_range
snake_case_ : Optional[int] = layer_norm_eps
snake_case_ : str = scale_embedding
snake_case_ : Optional[Any] = use_cache
snake_case_ : Optional[Any] = layerdrop
snake_case_ : Optional[Any] = activation_dropout
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
| 279 | 1 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class a__ :
"""simple docstring"""
__lowerCamelCase = BlenderbotSmallConfig
__lowerCamelCase = {}
__lowerCamelCase = 'gelu'
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Any:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = eos_token_id
A__ = pad_token_id
A__ = bos_token_id
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ = tf.concat([input_ids, eos_tensor] , axis=1 )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ = prepare_blenderbot_small_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def UpperCamelCase ( self , lowercase , lowercase ) -> str:
'''simple docstring'''
A__ = TFBlenderbotSmallModel(config=lowercase ).get_decoder()
A__ = inputs_dict["input_ids"]
A__ = input_ids[:1, :]
A__ = inputs_dict["attention_mask"][:1, :]
A__ = inputs_dict["head_mask"]
A__ = 1
# first forward pass
A__ = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase )
A__ , A__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A__ = tf.concat([input_ids, next_tokens] , axis=-1 )
A__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A__ = model(lowercase , attention_mask=lowercase )[0]
A__ = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A__ = output_from_no_past[:, -3:, random_slice_idx]
A__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1e-3 )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Optional[Any]=None , SCREAMING_SNAKE_CASE_: Optional[int]=None , SCREAMING_SNAKE_CASE_: Optional[int]=None , SCREAMING_SNAKE_CASE_: Dict=None , SCREAMING_SNAKE_CASE_: List[str]=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
A__ = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a__ ( snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__lowerCamelCase = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__lowerCamelCase = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = TFBlenderbotSmallModelTester(self )
A__ = ConfigTester(self , config_class=lowercase )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
@require_tokenizers
@require_tf
class a__ ( unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
__lowerCamelCase = 'facebook/blenderbot_small-90M'
@cached_property
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = self.tokenizer(self.src_text , return_tensors="tf" )
A__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , )
A__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 68 |
'''simple docstring'''
class lowercase__ :
'''simple docstring'''
def __init__( self , __snake_case = "" , __snake_case = False ):
# Mapping from the first character of the prefix of the node
_SCREAMING_SNAKE_CASE : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
_SCREAMING_SNAKE_CASE : List[Any] = is_leaf
_SCREAMING_SNAKE_CASE : Optional[Any] = prefix
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : Optional[int] = 0
for q, w in zip(self.prefix , __snake_case ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCAmelCase_ ( self , __snake_case ):
for word in words:
self.insert(__snake_case )
def UpperCAmelCase_ ( self , __snake_case ):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
_SCREAMING_SNAKE_CASE : List[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
_SCREAMING_SNAKE_CASE : List[str] = RadixNode(prefix=__snake_case , is_leaf=__snake_case )
else:
_SCREAMING_SNAKE_CASE : int = self.nodes[word[0]]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = incoming_node.match(
__snake_case )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(__snake_case )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = remaining_prefix
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.nodes[matching_string[0]]
_SCREAMING_SNAKE_CASE : List[Any] = RadixNode(__snake_case , __snake_case )
_SCREAMING_SNAKE_CASE : Union[str, Any] = aux_node
if remaining_word == "":
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
else:
self.nodes[matching_string[0]].insert(__snake_case )
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.nodes.get(word[0] , __snake_case )
if not incoming_node:
return False
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = incoming_node.match(
__snake_case )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(__snake_case )
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : Tuple = self.nodes.get(word[0] , __snake_case )
if not incoming_node:
return False
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = incoming_node.match(
__snake_case )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(__snake_case )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
_SCREAMING_SNAKE_CASE : Optional[Any] = list(self.nodes.values() )[0]
_SCREAMING_SNAKE_CASE : Optional[Any] = merging_node.is_leaf
self.prefix += merging_node.prefix
_SCREAMING_SNAKE_CASE : Tuple = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
_SCREAMING_SNAKE_CASE : List[str] = False
# If there is 1 edge, we merge it with its child
else:
_SCREAMING_SNAKE_CASE : int = list(incoming_node.nodes.values() )[0]
_SCREAMING_SNAKE_CASE : Tuple = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
_SCREAMING_SNAKE_CASE : str = merging_node.nodes
return True
def UpperCAmelCase_ ( self , __snake_case = 0 ):
if self.prefix != "":
print("""-""" * height , self.prefix , """ (leaf)""" if self.is_leaf else """""" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = """banana bananas bandana band apple all beast""".split()
_SCREAMING_SNAKE_CASE : Optional[Any] = RadixNode()
root.insert_many(SCREAMING_SNAKE_CASE__ )
assert all(root.find(SCREAMING_SNAKE_CASE__ ) for word in words )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def snake_case_ ( ):
"""simple docstring"""
assert test_trie()
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = RadixNode()
_SCREAMING_SNAKE_CASE : Optional[int] = """banana bananas bandanas bandana band apple all beast""".split()
root.insert_many(SCREAMING_SNAKE_CASE__ )
print("""Words:""" , SCREAMING_SNAKE_CASE__ )
print("""Tree:""" )
root.print_tree()
if __name__ == "__main__":
main()
| 200 | 0 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase__ :
def __init__( self : Tuple ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : int=2 ,lowerCamelCase__ : str=3 ,lowerCamelCase__ : int=4 ,lowerCamelCase__ : str=2 ,lowerCamelCase__ : Optional[int]=7 ,lowerCamelCase__ : int=True ,lowerCamelCase__ : List[str]=True ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : int=True ,lowerCamelCase__ : Any=99 ,lowerCamelCase__ : Any=36 ,lowerCamelCase__ : Union[str, Any]=3 ,lowerCamelCase__ : Optional[Any]=4 ,lowerCamelCase__ : List[str]=37 ,lowerCamelCase__ : Optional[Any]="gelu" ,lowerCamelCase__ : Optional[Any]=0.1 ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : Any=512 ,lowerCamelCase__ : Optional[int]=16 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : Tuple=0.0_2 ,lowerCamelCase__ : List[Any]=6 ,lowerCamelCase__ : Optional[int]=6 ,lowerCamelCase__ : str=3 ,lowerCamelCase__ : List[str]=4 ,lowerCamelCase__ : Tuple=None ,lowerCamelCase__ : Union[str, Any]=1000 ,):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = parent
_UpperCamelCase : int = batch_size
_UpperCamelCase : Optional[Any] = num_channels
_UpperCamelCase : str = image_size
_UpperCamelCase : int = patch_size
_UpperCamelCase : Tuple = text_seq_length
_UpperCamelCase : Any = is_training
_UpperCamelCase : Tuple = use_input_mask
_UpperCamelCase : Dict = use_token_type_ids
_UpperCamelCase : List[str] = use_labels
_UpperCamelCase : Dict = vocab_size
_UpperCamelCase : Tuple = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Any = num_attention_heads
_UpperCamelCase : List[str] = intermediate_size
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Any = hidden_dropout_prob
_UpperCamelCase : int = attention_probs_dropout_prob
_UpperCamelCase : Any = max_position_embeddings
_UpperCamelCase : str = type_vocab_size
_UpperCamelCase : Optional[int] = type_sequence_label_size
_UpperCamelCase : Union[str, Any] = initializer_range
_UpperCamelCase : Optional[Any] = coordinate_size
_UpperCamelCase : str = shape_size
_UpperCamelCase : Tuple = num_labels
_UpperCamelCase : Union[str, Any] = num_choices
_UpperCamelCase : Any = scope
_UpperCamelCase : str = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_UpperCamelCase : Dict = text_seq_length
_UpperCamelCase : Dict = (image_size // patch_size) ** 2 + 1
_UpperCamelCase : List[Any] = self.text_seq_length + self.image_seq_length
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length] ,self.vocab_size )
_UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length, 4] ,self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCamelCase : List[Any] = bbox[i, j, 3]
_UpperCamelCase : List[str] = bbox[i, j, 1]
_UpperCamelCase : str = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCamelCase : Optional[Any] = bbox[i, j, 2]
_UpperCamelCase : Tuple = bbox[i, j, 0]
_UpperCamelCase : List[str] = t
_UpperCamelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : Optional[Any] = None
if self.use_input_mask:
_UpperCamelCase : List[Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
_UpperCamelCase : Any = None
if self.use_token_type_ids:
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length] ,self.type_vocab_size )
_UpperCamelCase : List[str] = None
_UpperCamelCase : Any = None
if self.use_labels:
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_UpperCamelCase : Dict = ids_tensor([self.batch_size, self.text_seq_length] ,self.num_labels )
_UpperCamelCase : Tuple = LayoutLMvaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,coordinate_size=self.coordinate_size ,shape_size=self.shape_size ,input_size=self.image_size ,patch_size=self.patch_size ,)
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : int ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Any ,lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : Tuple = LayoutLMvaModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# text + image
_UpperCamelCase : Dict = model(lowerCamelCase__ ,pixel_values=lowerCamelCase__ )
_UpperCamelCase : Optional[int] = model(
lowerCamelCase__ ,bbox=lowerCamelCase__ ,pixel_values=lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = model(lowerCamelCase__ ,bbox=lowerCamelCase__ ,pixel_values=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ )
_UpperCamelCase : List[str] = model(lowerCamelCase__ ,bbox=lowerCamelCase__ ,pixel_values=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
# text only
_UpperCamelCase : int = model(lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_UpperCamelCase : Optional[int] = model(pixel_values=lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : Any ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Any ,lowerCamelCase__ : Any ,lowerCamelCase__ : Any ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Dict = self.num_labels
_UpperCamelCase : List[Any] = LayoutLMvaForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : str = model(
lowerCamelCase__ ,bbox=lowerCamelCase__ ,pixel_values=lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : str ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : Any = self.num_labels
_UpperCamelCase : Dict = LayoutLMvaForTokenClassification(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : Optional[int] = model(
lowerCamelCase__ ,bbox=lowerCamelCase__ ,pixel_values=lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : int ,lowerCamelCase__ : str ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : int ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Dict ):
'''simple docstring'''
_UpperCamelCase : int = LayoutLMvaForQuestionAnswering(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : str = model(
lowerCamelCase__ ,bbox=lowerCamelCase__ ,pixel_values=lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,start_positions=lowerCamelCase__ ,end_positions=lowerCamelCase__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : List[str] = self.prepare_config_and_inputs()
(
_UpperCamelCase
) : Any = config_and_inputs
_UpperCamelCase : str = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase__ ( lowercase , lowercase , unittest.TestCase ):
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase__ = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : int ,lowerCamelCase__ : Any ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : str ):
'''simple docstring'''
return True
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : List[str] = LayoutLMvaModelTester(self )
_UpperCamelCase : Optional[int] = ConfigTester(self ,config_class=lowerCamelCase__ ,hidden_size=37 )
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Union[str, Any]=False ):
'''simple docstring'''
_UpperCamelCase : List[Any] = copy.deepcopy(lowerCamelCase__ )
if model_class in get_values(lowerCamelCase__ ):
_UpperCamelCase : Any = {
k: v.unsqueeze(1 ).expand(-1 ,self.model_tester.num_choices ,-1 ).contiguous()
if isinstance(lowerCamelCase__ ,torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase__ ):
_UpperCamelCase : Dict = torch.ones(self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase__ )
elif model_class in get_values(lowerCamelCase__ ):
_UpperCamelCase : Union[str, Any] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase__ )
_UpperCamelCase : Optional[int] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase__ )
elif model_class in [
*get_values(lowerCamelCase__ ),
]:
_UpperCamelCase : Any = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase__ )
elif model_class in [
*get_values(lowerCamelCase__ ),
]:
_UpperCamelCase : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) ,dtype=torch.long ,device=lowerCamelCase__ ,)
return inputs_dict
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCamelCase : str = type
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase__ )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase__ )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase__ )
@slow
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : int = LayoutLMvaModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def A__ ( ):
_UpperCamelCase : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class lowercase__ ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase__ ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(lowerCamelCase__ )
_UpperCamelCase : Any = self.default_image_processor
_UpperCamelCase : Tuple = prepare_img()
_UpperCamelCase : Optional[Any] = image_processor(images=lowerCamelCase__ ,return_tensors='pt' ).pixel_values.to(lowerCamelCase__ )
_UpperCamelCase : Tuple = torch.tensor([[1, 2]] )
_UpperCamelCase : List[Any] = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_UpperCamelCase : List[Any] = model(
input_ids=input_ids.to(lowerCamelCase__ ) ,bbox=bbox.to(lowerCamelCase__ ) ,pixel_values=pixel_values.to(lowerCamelCase__ ) ,)
# verify the logits
_UpperCamelCase : int = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape ,lowerCamelCase__ )
_UpperCamelCase : int = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowerCamelCase__ ,atol=1E-4 ) )
| 368 |
'''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
snake_case_ : Dict = {
'bart': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'bert': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-base-cased-finetuned-mrpc': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'dpr': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'gpt2': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlnet': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm-roberta': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'transfo-xl': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'openai-gpt': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'roberta': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'layoutlm': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'roberta-large-mnli': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'camembert': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'flaubert': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert-base-distilled-squad': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert-visual-feature-encoder': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'ctrl': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'albert': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
't5': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'electra': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'wav2vec2': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=False , UpperCAmelCase_=True ):
if model_type not in MODEL_CLASSES:
raise ValueError(f'Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.' )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[str] = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
_UpperCamelCase : Optional[int] = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , force_download=not use_cached_models )
_UpperCamelCase : Optional[Any] = config_class.from_json_file(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : List[str] = True
print(f'Building TensorFlow model from configuration: {config}' )
_UpperCamelCase : Any = model_class(UpperCAmelCase_ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
_UpperCamelCase : Union[str, Any] = cached_file(
UpperCAmelCase_ , UpperCAmelCase_ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
_UpperCamelCase : List[Any] = load_pytorch_checkpoint_in_tfa_model(UpperCAmelCase_ , UpperCAmelCase_ )
if compare_with_pt_model:
_UpperCamelCase : Optional[int] = tf_model(tf_model.dummy_inputs , training=UpperCAmelCase_ ) # build the network
_UpperCamelCase : Optional[Any] = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : List[str] = pt_model_class.from_pretrained(
pretrained_model_name_or_path=UpperCAmelCase_ , config=UpperCAmelCase_ , state_dict=UpperCAmelCase_ )
with torch.no_grad():
_UpperCamelCase : Optional[Any] = pt_model(**pt_model.dummy_inputs )
_UpperCamelCase : int = pto[0].numpy()
_UpperCamelCase : Any = tfo[0].numpy()
_UpperCamelCase : Dict = np.amax(np.abs(np_pt - np_tf ) )
print(f'Max absolute difference between models outputs {diff}' )
assert diff <= 2E-2, f'Error, model absolute difference is >2e-2: {diff}'
# Save pytorch-model
print(f'Save TensorFlow model to {tf_dump_path}' )
tf_model.save_weights(UpperCAmelCase_ , save_format='h5' )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=False , ):
if args_model_type is None:
_UpperCamelCase : List[Any] = list(MODEL_CLASSES.keys() )
else:
_UpperCamelCase : Tuple = [args_model_type]
for j, model_type in enumerate(UpperCAmelCase_ , start=1 ):
print('=' * 1_0_0 )
print(f' Converting model type {j}/{len(UpperCAmelCase_ )}: {model_type}' )
print('=' * 1_0_0 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.' )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Any = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
_UpperCamelCase : List[Any] = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
_UpperCamelCase : int = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(UpperCAmelCase_ , UpperCAmelCase_ ) , start=1 ):
print('-' * 1_0_0 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f' Skipping finetuned checkpoint {model_shortcut_name}' )
continue
_UpperCamelCase : Dict = model_shortcut_name
elif only_convert_finetuned_models:
print(f' Skipping not finetuned checkpoint {model_shortcut_name}' )
continue
print(
f' Converting checkpoint {i}/{len(UpperCAmelCase_ )}: {model_shortcut_name} - model_type {model_type}' )
print('-' * 1_0_0 )
if config_shortcut_name in aws_config_map:
_UpperCamelCase : Any = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , force_download=not use_cached_models )
else:
_UpperCamelCase : str = config_shortcut_name
if model_shortcut_name in aws_model_maps:
_UpperCamelCase : int = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , force_download=not use_cached_models )
else:
_UpperCamelCase : List[str] = model_shortcut_name
if os.path.isfile(UpperCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = 'converted_model'
convert_pt_checkpoint_to_tf(
model_type=UpperCAmelCase_ , pytorch_checkpoint_path=UpperCAmelCase_ , config_file=UpperCAmelCase_ , tf_dump_path=os.path.join(UpperCAmelCase_ , model_shortcut_name + '-tf_model.h5' ) , compare_with_pt_model=UpperCAmelCase_ , )
if remove_cached_files:
os.remove(UpperCAmelCase_ )
os.remove(UpperCAmelCase_ )
if __name__ == "__main__":
snake_case_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.'
)
parser.add_argument(
'--model_type',
default=None,
type=str,
help=(
F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
'convert all the models from AWS.'
),
)
parser.add_argument(
'--pytorch_checkpoint_path',
default=None,
type=str,
help=(
'Path to the PyTorch checkpoint path or shortcut name to download from AWS. '
'If not given, will download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--config_file',
default=None,
type=str,
help=(
'The config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture. If not given and '
'--pytorch_checkpoint_path is not given or is a shortcut name '
'use the configuration associated to the shortcut name on the AWS'
),
)
parser.add_argument(
'--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.'
)
parser.add_argument(
'--use_cached_models',
action='store_true',
help='Use cached models if possible instead of updating to latest checkpoint versions.',
)
parser.add_argument(
'--remove_cached_files',
action='store_true',
help='Remove pytorch models after conversion (save memory when converting in batches).',
)
parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.')
snake_case_ : Optional[int] = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 236 | 0 |
"""simple docstring"""
from __future__ import annotations
A: Dict = tuple[int, int, int]
A: Optional[Any] = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
A: List[str] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# -------------------------- default selection --------------------------
# rotors --------------------------
A: str = "EGZWVONAHDCLFQMSIPJBYUKXTR"
A: List[str] = "FOBHMDKEXQNRAULPGSJVTYICZW"
A: Any = "ZJXESIUQLHAVRMDOYGTNFWPBKC"
# reflector --------------------------
A: str = {
"A": "N",
"N": "A",
"B": "O",
"O": "B",
"C": "P",
"P": "C",
"D": "Q",
"Q": "D",
"E": "R",
"R": "E",
"F": "S",
"S": "F",
"G": "T",
"T": "G",
"H": "U",
"U": "H",
"I": "V",
"V": "I",
"J": "W",
"W": "J",
"K": "X",
"X": "K",
"L": "Y",
"Y": "L",
"M": "Z",
"Z": "M",
}
# -------------------------- extra rotors --------------------------
A: Dict = "RMDJXFUWGISLHVTCQNKYPBEZOA"
A: Optional[int] = "SGLCPQWZHKXAREONTFBVIYJUDM"
A: Optional[int] = "HVSICLTYKQUBXDWAJZOMFGPREN"
A: List[str] = "RZWQHFMVDBKICJLNTUXAGYPSOE"
A: Optional[int] = "LFKIJODBEGAMQPXVUHYSTCZRWN"
A: Optional[Any] = "KOAEGVDHXPQZMLFTYWJNBRCIUS"
def _snake_case ( UpperCamelCase : RotorPositionT , UpperCamelCase : RotorSelectionT , UpperCamelCase : str ):
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(UpperCamelCase ) )) < 3:
UpperCAmelCase : Union[str, Any] = F"Please use 3 unique rotors (not {unique_rotsel})"
raise Exception(UpperCamelCase )
# Checks if rotor positions are valid
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = rotpos
if not 0 < rotorposa <= len(UpperCamelCase ):
UpperCAmelCase : Optional[Any] = F"First rotor position is not within range of 1..26 ({rotorposa}"
raise ValueError(UpperCamelCase )
if not 0 < rotorposa <= len(UpperCamelCase ):
UpperCAmelCase : List[Any] = F"Second rotor position is not within range of 1..26 ({rotorposa})"
raise ValueError(UpperCamelCase )
if not 0 < rotorposa <= len(UpperCamelCase ):
UpperCAmelCase : List[Any] = F"Third rotor position is not within range of 1..26 ({rotorposa})"
raise ValueError(UpperCamelCase )
# Validates string and returns dict
UpperCAmelCase : Optional[Any] = _plugboard(UpperCamelCase )
return rotpos, rotsel, pbdict
def _snake_case ( UpperCamelCase : str ):
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(UpperCamelCase , UpperCamelCase ):
UpperCAmelCase : List[str] = F"Plugboard setting isn't type string ({type(UpperCamelCase )})"
raise TypeError(UpperCamelCase )
elif len(UpperCamelCase ) % 2 != 0:
UpperCAmelCase : Union[str, Any] = F"Odd number of symbols ({len(UpperCamelCase )})"
raise Exception(UpperCamelCase )
elif pbstring == "":
return {}
pbstring.replace(""" """ , """""" )
# Checks if all characters are unique
UpperCAmelCase : str = set()
for i in pbstring:
if i not in abc:
UpperCAmelCase : str = F"'{i}' not in list of symbols"
raise Exception(UpperCamelCase )
elif i in tmppbl:
UpperCAmelCase : Dict = F"Duplicate symbol ({i})"
raise Exception(UpperCamelCase )
else:
tmppbl.add(UpperCamelCase )
del tmppbl
# Created the dictionary
UpperCAmelCase : Dict = {}
for j in range(0 , len(UpperCamelCase ) - 1 , 2 ):
UpperCAmelCase : Optional[int] = pbstring[j + 1]
UpperCAmelCase : List[str] = pbstring[j]
return pb
def _snake_case ( UpperCamelCase : str , UpperCamelCase : RotorPositionT , UpperCamelCase : RotorSelectionT = (rotora, rotora, rotora) , UpperCamelCase : str = "" , ):
UpperCAmelCase : Optional[int] = text.upper()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = _validator(
UpperCamelCase , UpperCamelCase , plugb.upper() )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = rotor_position
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
UpperCAmelCase : int = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
UpperCAmelCase : Optional[int] = plugboard[symbol]
# rotor ra --------------------------
UpperCAmelCase : List[str] = abc.index(UpperCamelCase ) + rotorposa
UpperCAmelCase : Tuple = rotora[index % len(UpperCamelCase )]
# rotor rb --------------------------
UpperCAmelCase : List[Any] = abc.index(UpperCamelCase ) + rotorposa
UpperCAmelCase : Optional[int] = rotora[index % len(UpperCamelCase )]
# rotor rc --------------------------
UpperCAmelCase : Dict = abc.index(UpperCamelCase ) + rotorposa
UpperCAmelCase : Union[str, Any] = rotora[index % len(UpperCamelCase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
UpperCAmelCase : Union[str, Any] = reflector[symbol]
# 2nd rotors
UpperCAmelCase : str = abc[rotora.index(UpperCamelCase ) - rotorposa]
UpperCAmelCase : int = abc[rotora.index(UpperCamelCase ) - rotorposa]
UpperCAmelCase : Optional[Any] = abc[rotora.index(UpperCamelCase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
UpperCAmelCase : Any = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(UpperCamelCase ):
UpperCAmelCase : Dict = 0
rotorposa += 1
if rotorposa >= len(UpperCamelCase ):
UpperCAmelCase : Tuple = 0
rotorposa += 1
if rotorposa >= len(UpperCamelCase ):
UpperCAmelCase : List[str] = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(UpperCamelCase )
return "".join(UpperCamelCase )
if __name__ == "__main__":
A: Dict = "This is my Python script that emulates the Enigma machine from WWII."
A: Union[str, Any] = (1, 1, 1)
A: int = "pictures"
A: List[Any] = (rotora, rotora, rotora)
A: Any = enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| 109 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
lowerCamelCase : Optional[int] = False
class A__ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def A ( self : Tuple ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
_SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =pipe(
image=_a , generator=_a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
_SCREAMING_SNAKE_CASE =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_SCREAMING_SNAKE_CASE =np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 47 | 0 |
'''simple docstring'''
from manim import *
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def lowerCAmelCase__ ( self : Union[str, Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = Rectangle(height=0.5 , width=0.5 )
_UpperCAmelCase : Union[str, Any] = Rectangle(height=0.2_5 , width=0.2_5 )
_UpperCAmelCase : str = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
_UpperCAmelCase : List[Any] = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Any = [mem.copy() for i in range(6 )]
_UpperCAmelCase : str = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : Optional[Any] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : Tuple = VGroup(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : Optional[Any] = Text("CPU" , font_size=24 )
_UpperCAmelCase : List[Any] = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase__ )
_UpperCAmelCase : Tuple = [mem.copy() for i in range(4 )]
_UpperCAmelCase : Optional[int] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : Optional[Any] = Text("GPU" , font_size=24 )
_UpperCAmelCase : List[str] = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCamelCase__ )
_UpperCAmelCase : str = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Optional[int] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : str = Text("Model" , font_size=24 )
_UpperCAmelCase : Optional[Any] = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
model.move_to([3, -1.0, 0] )
self.add(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : str = []
for i, rect in enumerate(lowerCamelCase__ ):
rect.set_stroke(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=lowerCamelCase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=lowerCamelCase__ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=lowerCamelCase__ , buff=0.0 )
self.add(lowerCamelCase__ )
model_cpu_arr.append(lowerCamelCase__ )
self.add(*lowerCamelCase__ , *lowerCamelCase__ , *lowerCamelCase__ )
_UpperCAmelCase : List[Any] = [mem.copy() for i in range(6 )]
_UpperCAmelCase : int = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : Optional[int] = Text("Loaded Checkpoint" , font_size=24 )
_UpperCAmelCase : Optional[int] = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
checkpoint.move_to([3, 0.5, 0] )
self.add(lowerCamelCase__ )
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : Optional[Any] = []
for i, rect in enumerate(lowerCamelCase__ ):
_UpperCAmelCase : Dict = fill.copy().set_fill(lowerCamelCase__ , opacity=0.7 )
target.move_to(lowerCamelCase__ )
ckpt_arr.append(lowerCamelCase__ )
_UpperCAmelCase : int = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(lowerCamelCase__ )
self.add(*lowerCamelCase__ , *lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_UpperCAmelCase : Union[str, Any] = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Any = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowerCamelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(lowerCamelCase__ )
_UpperCAmelCase : Any = MarkupText(
F"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
_UpperCAmelCase : str = [meta_mem.copy() for i in range(6 )]
_UpperCAmelCase : Union[str, Any] = [meta_mem.copy() for i in range(6 )]
_UpperCAmelCase : str = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : Any = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : Union[str, Any] = VGroup(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : List[str] = Text("Disk" , font_size=24 )
_UpperCAmelCase : Union[str, Any] = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
disk.move_to([-4.0, -1.2_5, 0] )
self.play(Write(lowerCamelCase__ , run_time=3 ) , Write(lowerCamelCase__ , run_time=1 ) , Create(lowerCamelCase__ , run_time=1 ) )
_UpperCAmelCase : List[str] = []
for i, rect in enumerate(lowerCamelCase__ ):
_UpperCAmelCase : Dict = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(lowerCamelCase__ , run_time=1.5 ) )
self.play(*lowerCamelCase__ )
self.play(FadeOut(lowerCamelCase__ ) )
_UpperCAmelCase : Any = MarkupText(F"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase__ , run_time=3 ) )
self.play(
FadeOut(lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ , *lowerCamelCase__ ) , )
self.wait()
| 355 |
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase = 4_000_000 ):
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase , _UpperCAmelCase : Dict = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : Any = b, a + b
return sum(__lowerCAmelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 322 | 0 |
import baseaa
def _a ( UpperCAmelCase ) -> bytes:
"""simple docstring"""
return baseaa.aaaencode(string.encode('''utf-8''' ) )
def _a ( UpperCAmelCase ) -> str:
"""simple docstring"""
return baseaa.aaadecode(UpperCAmelCase ).decode('''utf-8''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 142 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def __lowerCamelCase ( *A : Dict , **A : Optional[int] ) ->Dict:
pass
@is_pipeline_test
@require_vision
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
_UpperCAmelCase : Optional[int] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def __lowerCamelCase ( self : Any , A : List[str] , A : Tuple , A : List[str] ) ->List[Any]:
lowerCamelCase__ : List[str] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCamelCase__ : Union[str, Any] = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def __lowerCamelCase ( self : List[Any] , A : Optional[int] , A : Tuple ) ->Optional[Any]:
lowerCamelCase__ : str = object_detector(examples[0] , threshold=0.0 )
lowerCamelCase__ : Union[str, Any] = len(A )
self.assertGreater(A , 0 )
self.assertEqual(
A , [
{
'''score''': ANY(A ),
'''label''': ANY(A ),
'''box''': {'''xmin''': ANY(A ), '''ymin''': ANY(A ), '''xmax''': ANY(A ), '''ymax''': ANY(A )},
}
for i in range(A )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def __lowerCamelCase ( self : Dict ) ->List[Any]:
pass
@require_torch
def __lowerCamelCase ( self : Optional[Any] ) ->List[Any]:
lowerCamelCase__ : Optional[int] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCamelCase__ : List[Any] = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
] , )
lowerCamelCase__ : str = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
[
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
]
] , )
@require_torch
@slow
def __lowerCamelCase ( self : Union[str, Any] ) ->Optional[Any]:
lowerCamelCase__ : Tuple = pipeline('''zero-shot-object-detection''' )
lowerCamelCase__ : str = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
] , )
lowerCamelCase__ : List[Any] = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def __lowerCamelCase ( self : int ) ->Union[str, Any]:
pass
@require_torch
@slow
def __lowerCamelCase ( self : Optional[int] ) ->Optional[int]:
lowerCamelCase__ : Optional[Any] = 0.2
lowerCamelCase__ : List[Any] = pipeline('''zero-shot-object-detection''' )
lowerCamelCase__ : Any = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=A , )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
] , )
@require_torch
@slow
def __lowerCamelCase ( self : Any ) ->str:
lowerCamelCase__ : List[Any] = 2
lowerCamelCase__ : Union[str, Any] = pipeline('''zero-shot-object-detection''' )
lowerCamelCase__ : List[str] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=A , )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
] , )
| 142 | 1 |
"""simple docstring"""
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
SCREAMING_SNAKE_CASE : str = {
"""tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""",
"""tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""",
"""base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""",
"""base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""",
"""small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""",
"""small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""",
"""medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""",
"""medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""",
"""large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""",
"""large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""",
}
def lowercase ( _snake_case : List[Any] ) ->List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
SCREAMING_SNAKE_CASE : str = {
"""blocks""": """layers""",
"""mlp.0""": """fc1""",
"""mlp.2""": """fc2""",
"""mlp_ln""": """final_layer_norm""",
""".attn.query""": """.self_attn.q_proj""",
""".attn.key""": """.self_attn.k_proj""",
""".attn.value""": """.self_attn.v_proj""",
""".attn_ln""": """.self_attn_layer_norm""",
""".attn.out""": """.self_attn.out_proj""",
""".cross_attn.query""": """.encoder_attn.q_proj""",
""".cross_attn.key""": """.encoder_attn.k_proj""",
""".cross_attn.value""": """.encoder_attn.v_proj""",
""".cross_attn_ln""": """.encoder_attn_layer_norm""",
""".cross_attn.out""": """.encoder_attn.out_proj""",
"""decoder.ln.""": """decoder.layer_norm.""",
"""encoder.ln.""": """encoder.layer_norm.""",
"""token_embedding""": """embed_tokens""",
"""encoder.positional_embedding""": """encoder.embed_positions.weight""",
"""decoder.positional_embedding""": """decoder.embed_positions.weight""",
"""ln_post""": """layer_norm""",
}
def lowercase ( _snake_case : List[str] ) ->Optional[int]:
"""simple docstring"""
__snake_case : Any = list(s_dict.keys() )
for key in keys:
__snake_case : str = key
for k, v in WHISPER_MAPPING.items():
if k in key:
__snake_case : Any = new_key.replace(_snake_case , _snake_case )
print(f"""{key} -> {new_key}""" )
__snake_case : str = s_dict.pop(_snake_case )
return s_dict
def lowercase ( _snake_case : int ) ->str:
"""simple docstring"""
__snake_case : int = emb.weight.shape
__snake_case : List[str] = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
__snake_case : Any = emb.weight.data
return lin_layer
def lowercase ( _snake_case : str , _snake_case : str ) ->bytes:
"""simple docstring"""
os.makedirs(_snake_case , exist_ok=_snake_case )
__snake_case : List[str] = os.path.basename(_snake_case )
__snake_case : str = url.split('''/''' )[-2]
__snake_case : Optional[int] = os.path.join(_snake_case , _snake_case )
if os.path.exists(_snake_case ) and not os.path.isfile(_snake_case ):
raise RuntimeError(f"""{download_target} exists and is not a regular file""" )
if os.path.isfile(_snake_case ):
__snake_case : List[Any] = open(_snake_case , '''rb''' ).read()
if hashlib.shaaaa(_snake_case ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(_snake_case ) as source, open(_snake_case , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=80 , unit='''iB''' , unit_scale=_snake_case , unit_divisor=1_024 ) as loop:
while True:
__snake_case : Union[str, Any] = source.read(8_192 )
if not buffer:
break
output.write(_snake_case )
loop.update(len(_snake_case ) )
__snake_case : List[Any] = open(_snake_case , '''rb''' ).read()
if hashlib.shaaaa(_snake_case ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def lowercase ( _snake_case : Any , _snake_case : int ) ->str:
"""simple docstring"""
if ".pt" not in checkpoint_path:
__snake_case : Any = _download(_MODELS[checkpoint_path] )
else:
__snake_case : Optional[Any] = torch.load(_snake_case , map_location='''cpu''' )
__snake_case : int = original_checkpoint['''dims''']
__snake_case : List[Any] = original_checkpoint['''model_state_dict''']
__snake_case : List[str] = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(_snake_case )
rename_keys(_snake_case )
__snake_case : Optional[int] = True
__snake_case : int = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
__snake_case : List[Any] = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=_snake_case , decoder_ffn_dim=_snake_case , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
__snake_case : int = WhisperForConditionalGeneration(_snake_case )
__snake_case : List[Any] = model.model.load_state_dict(_snake_case , strict=_snake_case )
if len(_snake_case ) > 0 and not set(_snake_case ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
__snake_case : List[Any] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
__snake_case : List[str] = proj_out_weights
model.save_pretrained(_snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 354 |
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
SCREAMING_SNAKE_CASE : Tuple = None
try:
import msvcrt
except ImportError:
SCREAMING_SNAKE_CASE : List[str] = None
try:
import fcntl
except ImportError:
SCREAMING_SNAKE_CASE : Tuple = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
SCREAMING_SNAKE_CASE : List[str] = OSError
# Data
# ------------------------------------------------
SCREAMING_SNAKE_CASE : List[Any] = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
SCREAMING_SNAKE_CASE : List[Any] = """3.0.12"""
SCREAMING_SNAKE_CASE : int = None
def lowercase ( ) ->str:
"""simple docstring"""
global _logger
__snake_case : Union[str, Any] = _logger or logging.getLogger(__name__ )
return _logger
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ ):
'''simple docstring'''
__snake_case : Optional[int] = lock_file
return None
def __str__(self ):
'''simple docstring'''
__snake_case : Tuple = f"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ ):
'''simple docstring'''
__snake_case : Optional[Any] = lock
return None
def __enter__(self ):
'''simple docstring'''
return self.lock
def __exit__(self , a_ , a_ , a_ ):
'''simple docstring'''
self.lock.release()
return None
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=-1 , a_=None ):
'''simple docstring'''
__snake_case : List[Any] = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
__snake_case : Dict = self.hash_filename_if_too_long(a_ , a_ )
# The path to the lock file.
__snake_case : str = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__snake_case : Dict = None
# The default timeout value.
__snake_case : List[Any] = timeout
# We use this lock primarily for the lock counter.
__snake_case : Tuple = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__snake_case : Optional[Any] = 0
return None
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._lock_file
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._timeout
@timeout.setter
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Dict = float(a_ )
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
raise NotImplementedError()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
raise NotImplementedError()
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._lock_file_fd is not None
def SCREAMING_SNAKE_CASE (self , a_=None , a_=0.05 ):
'''simple docstring'''
if timeout is None:
__snake_case : List[str] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__snake_case : Optional[int] = id(self )
__snake_case : str = self._lock_file
__snake_case : Optional[int] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(a_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__snake_case : Optional[int] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def SCREAMING_SNAKE_CASE (self , a_=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__snake_case : Tuple = id(self )
__snake_case : str = self._lock_file
logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
__snake_case : Dict = 0
logger().debug(f"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__(self ):
'''simple docstring'''
self.acquire()
return self
def __exit__(self , a_ , a_ , a_ ):
'''simple docstring'''
self.release()
return None
def __del__(self ):
'''simple docstring'''
self.release(force=a_ )
return None
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : Any = os.path.basename(a_ )
if len(a_ ) > max_length and max_length > 0:
__snake_case : List[Any] = os.path.dirname(a_ )
__snake_case : Any = str(hash(a_ ) )
__snake_case : List[Any] = filename[: max_length - len(a_ ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(a_ , a_ )
else:
return path
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_=-1 , a_=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(a_ , timeout=a_ , max_filename_length=a_ )
__snake_case : List[str] = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__snake_case : Any = os.open(self._lock_file , a_ )
except OSError:
pass
else:
try:
msvcrt.locking(a_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(a_ )
else:
__snake_case : Dict = fd
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self._lock_file_fd
__snake_case : Dict = None
msvcrt.locking(a_ , msvcrt.LK_UNLCK , 1 )
os.close(a_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_=-1 , a_=None ):
'''simple docstring'''
__snake_case : Optional[Any] = os.statvfs(os.path.dirname(a_ ) ).f_namemax
super().__init__(a_ , timeout=a_ , max_filename_length=a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__snake_case : List[str] = os.open(self._lock_file , a_ )
try:
fcntl.flock(a_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(a_ )
else:
__snake_case : Optional[int] = fd
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self._lock_file_fd
__snake_case : Tuple = None
fcntl.flock(a_ , fcntl.LOCK_UN )
os.close(a_ )
return None
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__snake_case : Tuple = os.open(self._lock_file , a_ )
except OSError:
pass
else:
__snake_case : List[Any] = fd
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
os.close(self._lock_file_fd )
__snake_case : int = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
SCREAMING_SNAKE_CASE : Dict = None
if msvcrt:
SCREAMING_SNAKE_CASE : List[Any] = WindowsFileLock
elif fcntl:
SCREAMING_SNAKE_CASE : List[str] = UnixFileLock
else:
SCREAMING_SNAKE_CASE : str = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 24 | 0 |
'''simple docstring'''
__lowerCAmelCase = "Input must be a string of 8 numbers plus letter"
__lowerCAmelCase = "TRWAGMYFPDXBNJZSQVHLCKE"
def __lowerCamelCase ( lowerCAmelCase_ ) -> bool:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_a : int = f"""Expected string as input, found {type(lowerCAmelCase_ ).__name__}"""
raise TypeError(lowerCAmelCase_ )
_a : Any = spanish_id.replace('-' , '' ).upper()
if len(lowerCAmelCase_ ) != 9:
raise ValueError(lowerCAmelCase_ )
try:
_a : List[str] = int(spanish_id_clean[0:8] )
_a : int = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(lowerCAmelCase_ ) from ex
if letter.isdigit():
raise ValueError(lowerCAmelCase_ )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowercase__ : Any = getLogger(__name__)
lowercase__ : List[str] = "cuda" if torch.cuda.is_available() else "cpu"
def A_ ( snake_case : List[str] , snake_case : str , snake_case : str , snake_case : int = 8 , snake_case : str = DEFAULT_DEVICE , snake_case : List[str]=False , snake_case : Union[str, Any]="summarization" , snake_case : str=None , **snake_case : List[Any] , ) -> Dict:
'''simple docstring'''
__UpperCamelCase = Path(snake_case ).open('''w''' , encoding='''utf-8''' )
__UpperCamelCase = str(snake_case )
__UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(snake_case ).to(snake_case )
if fpaa:
__UpperCamelCase = model.half()
__UpperCamelCase = AutoTokenizer.from_pretrained(snake_case )
logger.info(f"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
__UpperCamelCase = time.time()
# update config with task specific params
use_task_specific_params(snake_case , snake_case )
if prefix is None:
__UpperCamelCase = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(snake_case , snake_case ) ) ):
__UpperCamelCase = [prefix + text for text in examples_chunk]
__UpperCamelCase = tokenizer(snake_case , return_tensors='''pt''' , truncation=snake_case , padding='''longest''' ).to(snake_case )
__UpperCamelCase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **snake_case , )
__UpperCamelCase = tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case , clean_up_tokenization_spaces=snake_case )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
__UpperCamelCase = int(time.time() - start_time ) # seconds
__UpperCamelCase = len(snake_case )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def A_ ( ) -> Tuple:
'''simple docstring'''
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def A_ ( snake_case : str=True ) -> int:
'''simple docstring'''
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=snake_case , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=snake_case , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=snake_case , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=snake_case , required=snake_case , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=snake_case , required=snake_case , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=snake_case , required=snake_case , default=snake_case , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=snake_case , required=snake_case , default=snake_case , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=snake_case , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=snake_case , default=8 , required=snake_case , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=snake_case , default=-1 , required=snake_case , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=snake_case , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__UpperCamelCase , __UpperCamelCase = parser.parse_known_args()
__UpperCamelCase = parse_numeric_n_bool_cl_kwargs(snake_case )
if parsed_args and verbose:
print(f"parsed the following generate kwargs: {parsed_args}" )
__UpperCamelCase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__UpperCamelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=snake_case )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"score_path {args.score_path} will be overwritten unless you type ctrl-c." )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
__UpperCamelCase = generate_summaries_or_translations(
snake_case , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **snake_case , )
if args.reference_path is None:
return {}
# Compute scores
__UpperCamelCase = calculate_bleu if '''translation''' in args.task else calculate_rouge
__UpperCamelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
__UpperCamelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(snake_case )]
__UpperCamelCase = score_fn(snake_case , snake_case )
scores.update(snake_case )
if args.dump_args:
scores.update(snake_case )
if args.info:
__UpperCamelCase = args.info
if verbose:
print(snake_case )
if args.score_path is not None:
json.dump(snake_case , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 328 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE = 'CLIPImageProcessor'
SCREAMING_SNAKE_CASE = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , __snake_case=None , __snake_case=None , **__snake_case ) -> Optional[int]:
'''simple docstring'''
__a =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __snake_case , )
__a =kwargs.pop('feature_extractor' )
__a =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__snake_case , __snake_case )
def __call__( self , __snake_case=None , __snake_case=None , __snake_case=None , **__snake_case ) -> Tuple:
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__a =self.tokenizer(__snake_case , return_tensors=__snake_case , **__snake_case )
if images is not None:
__a =self.image_processor(__snake_case , return_tensors=__snake_case , **__snake_case )
if text is not None and images is not None:
__a =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__snake_case ) , tensor_type=__snake_case )
def __magic_name__ ( self , *__snake_case , **__snake_case ) -> List[str]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def __magic_name__ ( self , *__snake_case , **__snake_case ) -> Any:
'''simple docstring'''
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a =self.tokenizer.model_input_names
__a =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __snake_case , )
return self.image_processor_class
@property
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __snake_case , )
return self.image_processor
| 365 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer
SCREAMING_SNAKE_CASE = False
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
__a =['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
__a =dict(zip(__snake_case , range(len(__snake_case ) ) ) )
__a =['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
__a ={'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
__a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__snake_case ) )
def __magic_name__ ( self , **__snake_case ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def __magic_name__ ( self , __snake_case ) -> List[Any]:
'''simple docstring'''
__a ='adapt act apte'
__a ='adapt act apte'
return input_text, output_text
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a ='adapt act apte'
__a =['adapt', 'act', 'ap@@', 'te']
__a =tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__a =[tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__a =[0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
__a ='I am a small frog.'
__a =tok([src_text] , padding=__snake_case , truncation=__snake_case )['input_ids']
__a =tok.batch_decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
__a ='I am a small frog .'
__a ='.'
__a =tok(__snake_case )['input_ids']
__a =tok(__snake_case )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 308 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__a = logging.get_logger(__name__)
__a = {'''vocab_file''': '''spiece.model'''}
__a = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class __SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<sep>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<cls>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=["<eop>", "<eod>"] , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
lowercase : Dict = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
lowercase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE__ , remove_space=SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
lowercase : Union[str, Any] = 3
lowercase : Optional[Any] = do_lower_case
lowercase : List[Any] = remove_space
lowercase : List[str] = keep_accents
lowercase : Tuple = vocab_file
lowercase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
lowercase : Tuple = jieba
lowercase : Optional[Any] = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def __lowerCamelCase ( self ):
return len(self.sp_model )
def __lowerCamelCase ( self ):
lowercase : int = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
lowercase : Tuple = self.__dict__.copy()
lowercase : Any = None
return state
def __setstate__( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase : int = {}
lowercase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
if self.remove_space:
lowercase : int = ''' '''.join(inputs.strip().split() )
else:
lowercase : Any = inputs
lowercase : List[Any] = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
lowercase : str = unicodedata.normalize('''NFKD''' , SCREAMING_SNAKE_CASE__ )
lowercase : str = ''''''.join([c for c in outputs if not unicodedata.combining(SCREAMING_SNAKE_CASE__ )] )
if self.do_lower_case:
lowercase : Union[str, Any] = outputs.lower()
return outputs
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = self.preprocess_text(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
lowercase : Any = []
for piece in pieces:
if len(SCREAMING_SNAKE_CASE__ ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowercase : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(SCREAMING_SNAKE_CASE__ , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowercase : List[str] = cur_pieces[1:]
else:
lowercase : Dict = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(SCREAMING_SNAKE_CASE__ )
else:
new_pieces.append(SCREAMING_SNAKE_CASE__ )
return new_pieces
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[Any] = ''''''.join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ , ''' ''' ).strip()
return out_string
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : int = [self.sep_token_id]
lowercase : str = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is not None:
return ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1]
return ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : Union[str, Any] = [self.sep_token_id]
lowercase : Tuple = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase : Optional[Any] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , '''wb''' ) as fi:
lowercase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = super()._decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowercase : str = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 337 |
from __future__ import annotations
def __lowercase ( _UpperCamelCase ) ->float:
"""simple docstring"""
if not nums:
raise ValueError('''List is empty''' )
return sum(_UpperCamelCase ) / len(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 1 |
"""simple docstring"""
from __future__ import annotations
import requests
_a : Dict = set(
'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split()
)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : int = 1 ,_lowerCamelCase : str = "new" ,_lowerCamelCase : list | None = None ) -> dict:
_lowerCAmelCase : Optional[Any] = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(_lowerCAmelCase ) - valid_terms ) ):
_lowerCAmelCase : Dict = f"Invalid search term: {invalid_search_terms}"
raise ValueError(_lowerCAmelCase )
_lowerCAmelCase : Tuple = requests.get(
f"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}" ,headers={"""User-agent""": """A random string"""} ,)
if response.status_code == 429:
raise requests.HTTPError
_lowerCAmelCase : Union[str, Any] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(_lowerCAmelCase )}
_lowerCAmelCase : List[str] = {}
for id_ in range(_lowerCAmelCase ):
_lowerCAmelCase : Tuple = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
| 354 | """simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[torch.FloatTensor] = None
_UpperCamelCase : torch.FloatTensor = None
_UpperCamelCase : Optional[Tuple[torch.FloatTensor]] = None
_UpperCamelCase : Optional[Tuple[torch.FloatTensor]] = None
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__=1 , a__=0 , a__=2 , a__=512 , a__="cls" , a__=False , a__=True , **a__ , ):
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
_lowerCAmelCase : Optional[Any] = project_dim
_lowerCAmelCase : List[str] = pooler_fn
_lowerCAmelCase : Any = learn_encoder
_lowerCAmelCase : Optional[int] = use_attention_mask
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = [R"pooler", R"logit_scale"]
_UpperCamelCase : List[Any] = [R"position_ids", R"predictions.decoder.bias"]
_UpperCamelCase : List[Any] = "roberta"
_UpperCamelCase : Optional[int] = RobertaSeriesConfig
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : str = XLMRobertaModel(a__ )
_lowerCAmelCase : Optional[Any] = nn.Linear(config.hidden_size , config.project_dim )
_lowerCAmelCase : List[Any] = getattr(a__ , """has_pre_transformation""" , a__ )
if self.has_pre_transformation:
_lowerCAmelCase : List[str] = nn.Linear(config.hidden_size , config.project_dim )
_lowerCAmelCase : Optional[int] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def __A ( self , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , ):
_lowerCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : Optional[int] = self.base_model(
input_ids=a__ , attention_mask=a__ , token_type_ids=a__ , position_ids=a__ , head_mask=a__ , inputs_embeds=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , output_attentions=a__ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=a__ , )
if self.has_pre_transformation:
_lowerCAmelCase : Optional[Any] = outputs["""hidden_states"""][-2]
_lowerCAmelCase : Optional[Any] = self.pre_LN(a__ )
_lowerCAmelCase : int = self.transformation_pre(a__ )
return TransformationModelOutput(
projection_state=a__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
_lowerCAmelCase : Union[str, Any] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=a__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 126 | 0 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__lowerCamelCase = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class UpperCAmelCase ( unittest.TestCase ,A_ ):
def _SCREAMING_SNAKE_CASE (self : Dict ) -> int:
'''simple docstring'''
snake_case : Optional[Any] = load_tool("text-question-answering" )
self.tool.setup()
snake_case : str = load_tool("text-question-answering" , remote=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> int:
'''simple docstring'''
snake_case : Tuple = self.tool(snake_case__ , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case__ , "launched the BigScience Research Workshop" )
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[int] = self.remote_tool(snake_case__ , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case__ , "launched the BigScience Research Workshop" )
def _SCREAMING_SNAKE_CASE (self : int ) -> int:
'''simple docstring'''
snake_case : Dict = self.tool(text=snake_case__ , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case__ , "launched the BigScience Research Workshop" )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> int:
'''simple docstring'''
snake_case : List[Any] = self.remote_tool(text=snake_case__ , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case__ , "launched the BigScience Research Workshop" )
| 59 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 1_00_00_00 ) -> int:
'''simple docstring'''
lowercase_ = 1
lowercase_ = 1
lowercase_ = {1: 1}
for inputa in range(2 , __lowerCAmelCase ):
lowercase_ = 0
lowercase_ = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowercase_ = (3 * number) + 1
counter += 1
if inputa not in counters:
lowercase_ = counter
if counter > pre_counter:
lowercase_ = inputa
lowercase_ = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 136 | 0 |
"""simple docstring"""
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=True , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_multiple_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = weight_tying
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = num_labels
__UpperCamelCase = num_choices
__UpperCamelCase = scope
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCAmelCase ( self ):
'''simple docstring'''
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase = True
return config, input_ids, input_mask, token_labels
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = GPTNeoXJapaneseModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = True
__UpperCamelCase = GPTNeoXJapaneseModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = GPTNeoXJapaneseForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = True
__UpperCamelCase = GPTNeoXJapaneseForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# first forward pass
__UpperCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
__UpperCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
__UpperCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase )
__UpperCamelCase = output_from_no_past['hidden_states'][0]
__UpperCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )['hidden_states'][0]
# select random slice
__UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
lowercase = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
lowercase = (
{"feature-extraction": GPTNeoXJapaneseModel, "text-generation": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = GPTNeoXJapaneseModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
__UpperCamelCase = None
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = 'abeja/gpt-neox-japanese-2.7b'
__UpperCamelCase = ['データサイエンティストとは、', '100年後に必要とされる会社は、', 'フルリモートの環境で働くために必要なことは、', '国境の長いトンネルを抜けると', '美味しい日本食といえば、']
__UpperCamelCase = [
'データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。',
'100年後に必要とされる会社は、「人」が中心の会社です。',
'フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。',
'国境の長いトンネルを抜けると、そこは雪国だった。',
'美味しい日本食といえば、やっぱりお寿司ですよね。',
]
__UpperCamelCase = GPTNeoXJapaneseTokenizer.from_pretrained(__UpperCAmelCase )
__UpperCamelCase = GPTNeoXJapaneseForCausalLM.from_pretrained(__UpperCAmelCase )
__UpperCamelCase = []
for prompt in prompts:
__UpperCamelCase = tokenizer(__UpperCAmelCase , return_tensors='pt' ).input_ids
__UpperCamelCase = model.generate(__UpperCAmelCase , max_length=50 )
__UpperCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 263 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def A ( snake_case :str , snake_case :str = "cpu" , snake_case :Union[str, None] = None ) -> None:
__UpperCamelCase = torch.load(snake_case , map_location=snake_case )
for k, v in tqdm(state_dict.items() ):
if not isinstance(snake_case , torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
__UpperCamelCase = v.half()
if save_path is None: # overwrite src_path
__UpperCamelCase = src_path
torch.save(snake_case , snake_case )
if __name__ == "__main__":
fire.Fire(convert)
| 263 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : str = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ = """focalnet"""
def __init__( self , __SCREAMING_SNAKE_CASE=2_24 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=96 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=[1_92, 3_84, 7_68, 7_68] , __SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , __SCREAMING_SNAKE_CASE=[2, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[3, 3, 3, 3] , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=4.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=1E-4 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
lowercase_ : Tuple = image_size
lowercase_ : Optional[int] = patch_size
lowercase_ : str = num_channels
lowercase_ : Union[str, Any] = embed_dim
lowercase_ : Optional[Any] = use_conv_embed
lowercase_ : Union[str, Any] = hidden_sizes
lowercase_ : Dict = depths
lowercase_ : List[Any] = focal_levels
lowercase_ : Optional[Any] = focal_windows
lowercase_ : Union[str, Any] = hidden_act
lowercase_ : Tuple = mlp_ratio
lowercase_ : str = hidden_dropout_prob
lowercase_ : List[Any] = drop_path_rate
lowercase_ : Optional[int] = use_layerscale
lowercase_ : Any = layerscale_value
lowercase_ : Any = use_post_layernorm
lowercase_ : Dict = use_post_layernorm_in_modulation
lowercase_ : List[Any] = normalize_modulator
lowercase_ : Dict = initializer_range
lowercase_ : Tuple = layer_norm_eps
lowercase_ : str = encoder_stride
lowercase_ : List[str] = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
lowercase_ , lowercase_ : Optional[Any] = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
| 93 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
UpperCamelCase_ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
a_ = "lm_head"
a_ = getattr(UpperCAmelCase , UpperCAmelCase )
if weight_type is not None:
a_ = getattr(UpperCAmelCase , UpperCAmelCase ).shape
else:
a_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
a_ = value
elif weight_type == "weight_g":
a_ = value
elif weight_type == "weight_v":
a_ = value
elif weight_type == "bias":
a_ = value
else:
a_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
a_ = []
a_ = fairseq_model.state_dict()
a_ = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
a_ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hf_model.config.feat_extract_norm == "group" , )
a_ = True
else:
for key, mapped_key in MAPPING.items():
a_ = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
a_ = True
if "*" in mapped_key:
a_ = name.split(UpperCAmelCase )[0].split("." )[-2]
a_ = mapped_key.replace("*" , UpperCAmelCase )
if "weight_g" in name:
a_ = "weight_g"
elif "weight_v" in name:
a_ = "weight_v"
elif "bias" in name:
a_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a_ = "weight"
else:
a_ = None
set_recursively(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
continue
if not is_used:
unused_weights.append(UpperCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
a_ = full_name.split("conv_layers." )[-1]
a_ = name.split("." )
a_ = int(items[0] )
a_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
a_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
a_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
a_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
a_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase )
@torch.no_grad()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True ) ->Tuple:
"""simple docstring"""
if config_path is not None:
a_ = UniSpeechConfig.from_pretrained(UpperCAmelCase )
else:
a_ = UniSpeechConfig()
if is_finetuned:
if dict_path:
a_ = Dictionary.load_from_json(UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a_ = target_dict.pad_index
a_ = target_dict.bos_index
a_ = target_dict.eos_index
a_ = len(target_dict.symbols )
a_ = os.path.join(UpperCAmelCase , "vocab.json" )
if not os.path.isdir(UpperCAmelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(UpperCAmelCase ) )
return
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
a_ = target_dict.indices
# fairseq has the <pad> and <s> switched
a_ = 42
a_ = 43
with open(UpperCAmelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(UpperCAmelCase , UpperCAmelCase )
a_ = WavaVecaPhonemeCTCTokenizer(
UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=UpperCAmelCase , )
a_ = True if config.feat_extract_norm == "layer" else False
a_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
a_ = WavaVecaProcessor(feature_extractor=UpperCAmelCase , tokenizer=UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
a_ = UniSpeechForCTC(UpperCAmelCase )
else:
a_ = UniSpeechForPreTraining(UpperCAmelCase )
if is_finetuned:
a_ , a_ , a_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
a_ , a_ , a_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
a_ = model[0].eval()
recursively_load_weights(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
hf_unispeech.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
UpperCamelCase_ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 243 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class _snake_case ( A__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
_lowercase : str = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowercase : ClassVar[Features] = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} )
_lowercase : ClassVar[Features] = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string''' ),
'''answer_start''': Value('''int32''' ),
} )
} )
_lowercase : str = "question"
_lowercase : str = "context"
_lowercase : str = "answers"
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 327 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = emb.weight.shape
SCREAMING_SNAKE_CASE = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = torch.load(_UpperCAmelCase , map_location='cpu')
SCREAMING_SNAKE_CASE = mam_aaa['args'] or mam_aaa['cfg']['model']
SCREAMING_SNAKE_CASE = mam_aaa['model']
remove_ignore_keys_(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = state_dict['encoder.embed_tokens.weight'].shape[0]
SCREAMING_SNAKE_CASE = MaMaaaConfig(
vocab_size=_UpperCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , )
SCREAMING_SNAKE_CASE = state_dict['decoder.embed_tokens.weight']
SCREAMING_SNAKE_CASE = MaMaaaForConditionalGeneration(_UpperCAmelCase)
model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.shared)
return model
if __name__ == "__main__":
a_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
a_ : List[str] = parser.parse_args()
a_ : Dict = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 327 | 1 |
from ...processing_utils import ProcessorMixin
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'SpeechT5FeatureExtractor'
lowerCamelCase__ = 'SpeechT5Tokenizer'
def __init__( self, __a, __a):
'''simple docstring'''
super().__init__(__a, __a)
def __call__( self, *__a, **__a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = kwargs.pop("audio", __a)
_lowerCAmelCase : Dict = kwargs.pop("text", __a)
_lowerCAmelCase : Dict = kwargs.pop("text_target", __a)
_lowerCAmelCase : Union[str, Any] = kwargs.pop("audio_target", __a)
_lowerCAmelCase : Any = kwargs.pop("sampling_rate", __a)
if audio is not None and text is not None:
raise ValueError(
"Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?")
if audio_target is not None and text_target is not None:
raise ValueError(
"Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?")
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.")
if audio is not None:
_lowerCAmelCase : Tuple = self.feature_extractor(__a, *__a, sampling_rate=__a, **__a)
elif text is not None:
_lowerCAmelCase : List[Any] = self.tokenizer(__a, **__a)
else:
_lowerCAmelCase : Dict = None
if audio_target is not None:
_lowerCAmelCase : Union[str, Any] = self.feature_extractor(audio_target=__a, *__a, sampling_rate=__a, **__a)
_lowerCAmelCase : Optional[int] = targets["input_values"]
elif text_target is not None:
_lowerCAmelCase : List[Any] = self.tokenizer(__a, **__a)
_lowerCAmelCase : Union[str, Any] = targets["input_ids"]
else:
_lowerCAmelCase : Union[str, Any] = None
if inputs is None:
return targets
if targets is not None:
_lowerCAmelCase : Any = labels
_lowerCAmelCase : List[Any] = targets.get("attention_mask")
if decoder_attention_mask is not None:
_lowerCAmelCase : Tuple = decoder_attention_mask
return inputs
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
_lowerCAmelCase : List[str] = kwargs.pop("input_values", __a)
_lowerCAmelCase : int = kwargs.pop("input_ids", __a)
_lowerCAmelCase : List[Any] = kwargs.pop("labels", __a)
if input_values is not None and input_ids is not None:
raise ValueError("Cannot process both `input_values` and `input_ids` inputs.")
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.")
if input_values is not None:
_lowerCAmelCase : List[str] = self.feature_extractor.pad(__a, *__a, **__a)
elif input_ids is not None:
_lowerCAmelCase : Optional[Any] = self.tokenizer.pad(__a, **__a)
else:
_lowerCAmelCase : List[Any] = None
if labels is not None:
if "input_ids" in labels or (isinstance(__a, __a) and "input_ids" in labels[0]):
_lowerCAmelCase : str = self.tokenizer.pad(__a, **__a)
_lowerCAmelCase : str = targets["input_ids"]
else:
_lowerCAmelCase : Union[str, Any] = self.feature_extractor.feature_size
_lowerCAmelCase : str = self.feature_extractor.num_mel_bins
_lowerCAmelCase : str = self.feature_extractor.pad(__a, *__a, **__a)
_lowerCAmelCase : List[Any] = feature_size_hack
_lowerCAmelCase : str = targets["input_values"]
else:
_lowerCAmelCase : Optional[Any] = None
if inputs is None:
return targets
if targets is not None:
_lowerCAmelCase : str = labels
_lowerCAmelCase : List[str] = targets.get("attention_mask")
if decoder_attention_mask is not None:
_lowerCAmelCase : Any = decoder_attention_mask
return inputs
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
return self.tokenizer.batch_decode(*__a, **__a)
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
return self.tokenizer.decode(*__a, **__a)
| 36 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ['image_processor', 'tokenizer']
_lowerCamelCase : Tuple = 'OwlViTImageProcessor'
_lowerCamelCase : List[Any] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Optional[Any] , UpperCAmelCase : int=None , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : Any ):
A_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCAmelCase , )
A_ = kwargs.pop("feature_extractor" )
A_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Optional[int] , UpperCAmelCase : List[str]=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Dict="max_length" , UpperCAmelCase : Optional[Any]="np" , **UpperCAmelCase : Optional[int] ):
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(UpperCAmelCase , UpperCAmelCase ) or (isinstance(UpperCAmelCase , UpperCAmelCase ) and not isinstance(text[0] , UpperCAmelCase )):
A_ = [self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )]
elif isinstance(UpperCAmelCase , UpperCAmelCase ) and isinstance(text[0] , UpperCAmelCase ):
A_ = []
# Maximum number of queries across batch
A_ = max([len(UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(UpperCAmelCase ) != max_num_queries:
A_ = t + [" "] * (max_num_queries - len(UpperCAmelCase ))
A_ = self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
encodings.append(UpperCAmelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
A_ = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
A_ = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
A_ = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
A_ = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
A_ = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
A_ = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
A_ = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
A_ = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
A_ = BatchEncoding()
A_ = input_ids
A_ = attention_mask
if query_images is not None:
A_ = BatchEncoding()
A_ = self.image_processor(
UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ).pixel_values
A_ = query_pixel_values
if images is not None:
A_ = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if text is not None and images is not None:
A_ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
A_ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase )
def __A ( self : Optional[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : List[Any] ):
return self.image_processor.post_process(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : str , *UpperCAmelCase : str , **UpperCAmelCase : Union[str, Any] ):
return self.image_processor.post_process_object_detection(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : List[Any] , *UpperCAmelCase : int , **UpperCAmelCase : int ):
return self.image_processor.post_process_image_guided_detection(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : List[Any] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Any ):
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : str ):
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def __A ( self : Union[str, Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase , )
return self.image_processor_class
@property
def __A ( self : Optional[Any] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCAmelCase , )
return self.image_processor | 312 | 0 |
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowerCAmelCase__ : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowerCAmelCase__ : Dict = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
assert len(str(__snake_case ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
__UpperCAmelCase : str = year // 100
__UpperCAmelCase : str = (5 * (century % 4) + 2) % 7
__UpperCAmelCase : List[str] = year % 100
__UpperCAmelCase : Dict = centurian % 12
__UpperCAmelCase : Optional[Any] = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__UpperCAmelCase : int = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__UpperCAmelCase : Dict = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def __UpperCamelCase ( _UpperCAmelCase=None ):
if subparsers is not None:
__UpperCAmelCase : Optional[int] = subparsers.add_parser("env" )
else:
__UpperCAmelCase : List[Any] = argparse.ArgumentParser("Accelerate env command" )
parser.add_argument(
"--config_file", default=_UpperCAmelCase, help="The config file to use for the default values in the launching script." )
if subparsers is not None:
parser.set_defaults(func=_UpperCAmelCase )
return parser
def __UpperCamelCase ( _UpperCAmelCase ):
__UpperCAmelCase : Dict = torch.__version__
__UpperCAmelCase : str = torch.cuda.is_available()
__UpperCAmelCase : str = is_xpu_available()
__UpperCAmelCase : List[Any] = is_npu_available()
__UpperCAmelCase : Union[str, Any] = "Not found"
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_UpperCAmelCase ):
__UpperCAmelCase : Union[str, Any] = load_config_from_file(args.config_file ).to_dict()
__UpperCAmelCase : List[str] = {
"`Accelerate` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Numpy version": np.__version__,
"PyTorch version (GPU?)": F"{pt_version} ({pt_cuda_available})",
"PyTorch XPU available": str(_UpperCAmelCase ),
"PyTorch NPU available": str(_UpperCAmelCase ),
"System RAM": F"{psutil.virtual_memory().total / 1024 ** 3:.2f} GB",
}
if pt_cuda_available:
__UpperCAmelCase : int = torch.cuda.get_device_name()
print("\nCopy-and-paste the text below in your GitHub issue\n" )
print("\n".join([F"- {prop}: {val}" for prop, val in info.items()] ) )
print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" )
__UpperCAmelCase : Tuple = (
"\n".join([F"\t- {prop}: {val}" for prop, val in accelerate_config.items()] )
if isinstance(_UpperCAmelCase, _UpperCAmelCase )
else F"\t{accelerate_config}"
)
print(_UpperCAmelCase )
__UpperCAmelCase : Any = accelerate_config
return info
def __UpperCamelCase ( ):
__UpperCAmelCase : Tuple = env_command_parser()
__UpperCAmelCase : Dict = parser.parse_args()
env_command(_UpperCAmelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 37 | 0 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> set[str]:
A , A: Dict = set(__lowercase ), [start]
while stack:
A: List[str] = stack.pop()
explored.add(__lowercase )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(__lowercase )
return explored
UpperCamelCase = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 319 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=False , __lowercase=False , __lowercase=False ) -> Optional[Any]:
A: str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Any:
for i in range(config.num_hidden_layers ):
A: Tuple = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A: List[str] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
A: Optional[Any] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A: Dict = in_proj_weight[
: config.hidden_size, :
]
A: int = in_proj_bias[: config.hidden_size]
A: Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A: int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A: Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
A: Optional[Any] = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE( __lowercase ) -> int:
A: Optional[int] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> int:
A: List[Any] = dct.pop(__lowercase )
A: int = val
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> str:
A: Optional[Any] = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=__lowercase )
A: Tuple = False
A: str = False
A: List[Any] = False
A: Optional[int] = False
if "vqa" in checkpoint_url:
A: Union[str, Any] = True
A: Union[str, Any] = 3_1_2_9
A: List[Any] = '''huggingface/label-files'''
A: Any = '''vqa2-id2label.json'''
A: Optional[Any] = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) )
A: Union[str, Any] = {int(__lowercase ): v for k, v in idalabel.items()}
A: Any = idalabel
A: Optional[Any] = {v: k for k, v in idalabel.items()}
A: List[str] = ViltForQuestionAnswering(__lowercase )
elif "nlvr" in checkpoint_url:
A: Dict = True
A: str = 2
A: Union[str, Any] = {0: '''False''', 1: '''True'''}
A: Any = {v: k for k, v in config.idalabel.items()}
A: Optional[Any] = 3
A: Any = ViltForImagesAndTextClassification(__lowercase )
elif "irtr" in checkpoint_url:
A: Tuple = True
A: Optional[Any] = ViltForImageAndTextRetrieval(__lowercase )
elif "mlm_itm" in checkpoint_url:
A: Tuple = True
A: Optional[int] = ViltForMaskedLM(__lowercase )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
A: int = torch.hub.load_state_dict_from_url(__lowercase , map_location='''cpu''' )['''state_dict''']
A: List[str] = create_rename_keys(__lowercase , __lowercase , __lowercase , __lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_q_k_v(__lowercase , __lowercase )
if mlm_model or irtr_model:
A: str = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
A , A: Union[str, Any] = model.load_state_dict(__lowercase , strict=__lowercase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(__lowercase )
# Define processor
A: Optional[Any] = ViltImageProcessor(size=3_8_4 )
A: Dict = BertTokenizer.from_pretrained('''bert-base-uncased''' )
A: Optional[int] = ViltProcessor(__lowercase , __lowercase )
# Forward pass on example inputs (image + text)
if nlvr_model:
A: str = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=__lowercase ).raw )
A: List[str] = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=__lowercase ).raw )
A: Any = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
A: List[Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' )
A: List[Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' )
A: List[str] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
A: Any = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=__lowercase ).raw )
if mlm_model:
A: Optional[int] = '''a bunch of [MASK] laying on a [MASK].'''
else:
A: Optional[int] = '''How many cats are there?'''
A: Union[str, Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' )
A: Any = model(**__lowercase )
# Verify outputs
if mlm_model:
A: Any = torch.Size([1, 1_1, 3_0_5_2_2] )
A: Tuple = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __lowercase , atol=1E-4 )
# verify masked token prediction equals "cats"
A: List[str] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
A: Any = torch.Size([1, 3_1_2_9] )
A: Optional[int] = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __lowercase , atol=1E-4 )
# verify vqa prediction equals "2"
A: Dict = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
A: Union[str, Any] = torch.Size([1, 2] )
A: Optional[Any] = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCamelCase = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 319 | 1 |
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
lowercase__ = logging.get_logger(__name__)
enable_full_determinism()
class A_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : str = UNetaDModel
UpperCAmelCase_ : List[str] = 'sample'
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
UpperCAmelCase : Any = 4
UpperCAmelCase : Tuple = 3
UpperCAmelCase : Tuple = (32, 32)
UpperCAmelCase : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(a_ )
UpperCAmelCase : Dict = torch.tensor([10] ).to(a_ )
return {"sample": noise, "timestep": time_step}
@property
def UpperCAmelCase_ ( self : int ) -> List[Any]:
return (3, 32, 32)
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
return (3, 32, 32)
def UpperCAmelCase_ ( self : Any ) -> str:
UpperCAmelCase : Dict = {
'block_out_channels': (32, 64),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 32,
}
UpperCAmelCase : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
class A_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = UNetaDModel
UpperCAmelCase_ : List[Any] = 'sample'
@property
def UpperCAmelCase_ ( self : str ) -> Dict:
UpperCAmelCase : Union[str, Any] = 4
UpperCAmelCase : Optional[int] = 4
UpperCAmelCase : Optional[Any] = (32, 32)
UpperCAmelCase : Dict = floats_tensor((batch_size, num_channels) + sizes ).to(a_ )
UpperCAmelCase : List[Any] = torch.tensor([10] ).to(a_ )
return {"sample": noise, "timestep": time_step}
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
return (4, 32, 32)
@property
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
return (4, 32, 32)
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
UpperCAmelCase : List[Any] = {
'sample_size': 32,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (32, 64),
'attention_head_dim': 32,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
UpperCAmelCase : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase : int = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=a_ )
self.assertIsNotNone(a_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(a_ )
UpperCAmelCase : List[str] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase : int = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=a_ )
model.to(a_ )
UpperCAmelCase : List[str] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
UpperCAmelCase , UpperCAmelCase : Dict = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=a_ )
model_accelerate.to(a_ )
model_accelerate.eval()
UpperCAmelCase : Union[str, Any] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase : Tuple = noise.to(a_ )
UpperCAmelCase : Tuple = torch.tensor([10] * noise.shape[0] ).to(a_ )
UpperCAmelCase : List[str] = model_accelerate(a_ , a_ )['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
UpperCAmelCase , UpperCAmelCase : List[str] = UNetaDModel.from_pretrained(
'fusing/unet-ldm-dummy-update' , output_loading_info=a_ , low_cpu_mem_usage=a_ )
model_normal_load.to(a_ )
model_normal_load.eval()
UpperCAmelCase : Tuple = model_normal_load(a_ , a_ )['sample']
assert torch_all_close(a_ , a_ , rtol=1E-3 )
def UpperCAmelCase_ ( self : Any ) -> List[str]:
UpperCAmelCase : Optional[Any] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' )
model.eval()
model.to(a_ )
UpperCAmelCase : int = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase : Tuple = noise.to(a_ )
UpperCAmelCase : Dict = torch.tensor([10] * noise.shape[0] ).to(a_ )
with torch.no_grad():
UpperCAmelCase : Any = model(a_ , a_ ).sample
UpperCAmelCase : List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCAmelCase : Dict = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(a_ , a_ , rtol=1E-3 ) )
class A_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = UNetaDModel
UpperCAmelCase_ : List[str] = 'sample'
@property
def UpperCAmelCase_ ( self : Dict , lowercase_ : Dict=(32, 32) ) -> str:
UpperCAmelCase : List[str] = 4
UpperCAmelCase : Optional[Any] = 3
UpperCAmelCase : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(a_ )
UpperCAmelCase : int = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=a_ )
return {"sample": noise, "timestep": time_step}
@property
def UpperCAmelCase_ ( self : str ) -> Tuple:
return (3, 32, 32)
@property
def UpperCAmelCase_ ( self : int ) -> int:
return (3, 32, 32)
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
UpperCAmelCase : str = {
'block_out_channels': [32, 64, 64, 64],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1E-6,
'mid_block_scale_factor': math.sqrt(2.0 ),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
UpperCAmelCase : List[Any] = self.dummy_input
return init_dict, inputs_dict
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
UpperCAmelCase , UpperCAmelCase : str = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' , output_loading_info=a_ )
self.assertIsNotNone(a_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(a_ )
UpperCAmelCase : Any = self.dummy_input
UpperCAmelCase : Any = floats_tensor((4, 3) + (256, 256) ).to(a_ )
UpperCAmelCase : List[Any] = noise
UpperCAmelCase : Any = model(**a_ )
assert image is not None, "Make sure output is not None"
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
UpperCAmelCase : List[Any] = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' )
model.to(a_ )
UpperCAmelCase : Union[str, Any] = 4
UpperCAmelCase : List[Any] = 3
UpperCAmelCase : Union[str, Any] = (256, 256)
UpperCAmelCase : List[str] = torch.ones((batch_size, num_channels) + sizes ).to(a_ )
UpperCAmelCase : List[Any] = torch.tensor(batch_size * [1E-4] ).to(a_ )
with torch.no_grad():
UpperCAmelCase : Tuple = model(a_ , a_ ).sample
UpperCAmelCase : Tuple = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCAmelCase : Tuple = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(a_ , a_ , rtol=1E-2 ) )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
UpperCAmelCase : Any = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' )
model.to(a_ )
UpperCAmelCase : Tuple = 4
UpperCAmelCase : Dict = 3
UpperCAmelCase : Union[str, Any] = (32, 32)
UpperCAmelCase : Union[str, Any] = torch.ones((batch_size, num_channels) + sizes ).to(a_ )
UpperCAmelCase : Tuple = torch.tensor(batch_size * [1E-4] ).to(a_ )
with torch.no_grad():
UpperCAmelCase : Any = model(a_ , a_ ).sample
UpperCAmelCase : int = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCAmelCase : Any = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(a_ , a_ , rtol=1E-2 ) )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
pass
| 359 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase__ = logging.get_logger(__name__)
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = b.T
UpperCAmelCase : Optional[int] = np.sum(np.square(UpperCAmelCase_ ) , axis=1 )
UpperCAmelCase : List[Any] = np.sum(np.square(UpperCAmelCase_ ) , axis=0 )
UpperCAmelCase : List[str] = np.matmul(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : Union[str, Any] = aa[:, None] - 2 * ab + ba[None, :]
return d
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : int = x.reshape(-1 , 3 )
UpperCAmelCase : Optional[int] = squared_euclidean_distance(UpperCAmelCase_ , UpperCAmelCase_ )
return np.argmin(UpperCAmelCase_ , axis=1 )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = ["""pixel_values"""]
def __init__( self : List[Any] , lowercase_ : Optional[Union[List[List[int]], np.ndarray]] = None , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : bool = True , **lowercase_ : Optional[Any] , ) -> None:
super().__init__(**lowercase_ )
UpperCAmelCase : Any = size if size is not None else {'height': 256, 'width': 256}
UpperCAmelCase : List[Any] = get_size_dict(lowercase_ )
UpperCAmelCase : str = np.array(lowercase_ ) if clusters is not None else None
UpperCAmelCase : Any = do_resize
UpperCAmelCase : List[Any] = size
UpperCAmelCase : Any = resample
UpperCAmelCase : Dict = do_normalize
UpperCAmelCase : List[Any] = do_color_quantize
def UpperCAmelCase_ ( self : int , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Any , ) -> np.ndarray:
UpperCAmelCase : Dict = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dictionary must contain both height and width keys. Got {size.keys()}""" )
return resize(
lowercase_ , size=(size['height'], size['width']) , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : np.ndarray , lowercase_ : Optional[Union[str, ChannelDimension]] = None , ) -> np.ndarray:
UpperCAmelCase : int = rescale(image=lowercase_ , scale=1 / 127.5 , data_format=lowercase_ )
UpperCAmelCase : Dict = image - 1
return image
def UpperCAmelCase_ ( self : str , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[Union[List[List[int]], np.ndarray]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **lowercase_ : List[str] , ) -> PIL.Image.Image:
UpperCAmelCase : Optional[int] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase : Optional[Any] = size if size is not None else self.size
UpperCAmelCase : Optional[int] = get_size_dict(lowercase_ )
UpperCAmelCase : Any = resample if resample is not None else self.resample
UpperCAmelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase : str = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
UpperCAmelCase : Optional[int] = clusters if clusters is not None else self.clusters
UpperCAmelCase : List[str] = np.array(lowercase_ )
UpperCAmelCase : int = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
UpperCAmelCase : Dict = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
UpperCAmelCase : List[Any] = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_normalize:
UpperCAmelCase : Tuple = [self.normalize(image=lowercase_ ) for image in images]
if do_color_quantize:
UpperCAmelCase : List[str] = [to_channel_dimension_format(lowercase_ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
UpperCAmelCase : int = np.array(lowercase_ )
UpperCAmelCase : str = color_quantize(lowercase_ , lowercase_ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
UpperCAmelCase : Optional[int] = images.shape[0]
UpperCAmelCase : Union[str, Any] = images.reshape(lowercase_ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
UpperCAmelCase : int = list(lowercase_ )
else:
UpperCAmelCase : Dict = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
UpperCAmelCase : Any = {'input_ids': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 280 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Dict = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
SCREAMING_SNAKE_CASE__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Optional[int] = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 | 0 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : List[str] = logging.get_logger(__name__)
def lowercase__ ( __lowerCamelCase ) -> Dict:
a = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
a = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""" , __lowerCamelCase )
if matches:
a = float(matches[1] )
a = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
a = 1001
a = """imagenet-1k-id2label.json"""
a = """huggingface/label-files"""
a = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
a = {int(__lowerCamelCase ) + 1: v for k, v in idalabel.items()}
a = """background"""
a = idalabel
a = {v: k for k, v in idalabel.items()}
return config
def lowercase__ ( ) -> Tuple:
a = """http://images.cocodataset.org/val2017/000000039769.jpg"""
a = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowercase__ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ) -> List[Any]:
a = get_mobilenet_va_config(__lowerCamelCase )
# Load 🤗 model
a = MobileNetVaForImageClassification(__lowerCamelCase ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
a = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
a = image_processor(images=prepare_img() , return_tensors="""pt""" )
a = model(**__lowerCamelCase )
a = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
a = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
a = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
a = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1E-4 )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCamelCase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
a = """google/""" + model_name
image_processor.push_to_hub(__lowerCamelCase )
model.push_to_hub(__lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__UpperCamelCase : Tuple = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 355 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = tempfile.mkdtemp()
a = BlipImageProcessor()
a = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
a = BlipProcessor(__magic_name__ , __magic_name__ )
processor.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self :List[Any] , **__magic_name__ :Union[str, Any] ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).tokenizer
def lowerCamelCase__ ( self :str , **__magic_name__ :List[str] ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).image_processor
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
a = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 )
a = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__magic_name__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __magic_name__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __magic_name__ )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
a = self.prepare_image_inputs()
a = image_processor(__magic_name__ , return_tensors="""np""" )
a = processor(images=__magic_name__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
a = """lower newer"""
a = processor(text=__magic_name__ )
a = tokenizer(__magic_name__ , return_token_type_ids=__magic_name__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
a = """lower newer"""
a = self.prepare_image_inputs()
a = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__magic_name__ )
a = tokenizer.batch_decode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
a = """lower newer"""
a = self.prepare_image_inputs()
a = processor(text=__magic_name__ , images=__magic_name__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 347 | 0 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase=None , **__lowercase ) -> Any:
A: Any = [x.strip() for x in open(__lowercase ).readlines()]
A: Dict = [x.strip() for x in open(__lowercase ).readlines()][: len(__lowercase )]
A: Union[str, Any] = calculate_rouge(__lowercase , __lowercase , **__lowercase )
if save_path is not None:
save_json(__lowercase , __lowercase , indent=__lowercase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 319 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 319 | 1 |
'''simple docstring'''
from ....utils import logging
A : Optional[int] = logging.get_logger(__name__)
class __lowerCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : List[Any]=2048):
_A : int = config.__dict__
_A : Optional[Any] = modal_hidden_size
if num_labels:
_A : List[str] = num_labels
| 227 |
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
A : Any = logging.get_logger(__name__)
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE : str = None , SCREAMING_SNAKE_CASE : uuid.UUID = None , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : Optional[int]=None):
if not conversation_id:
_A : str = uuid.uuida()
if past_user_inputs is None:
_A : List[Any] = []
if generated_responses is None:
_A : Union[str, Any] = []
_A : uuid.UUID = conversation_id
_A : List[str] = past_user_inputs
_A : List[str] = generated_responses
_A : Optional[str] = text
def __eq__( self : List[str] , SCREAMING_SNAKE_CASE : Optional[int]):
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def A ( self : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool = False):
if self.new_user_input:
if overwrite:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
F'with: "{text}".')
_A : Optional[Any] = text
else:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
F'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input')
else:
_A : Optional[Any] = text
def A ( self : Optional[int]):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input)
_A : Optional[Any] = None
def A ( self : List[Any] , SCREAMING_SNAKE_CASE : str):
self.generated_responses.append(SCREAMING_SNAKE_CASE)
def A ( self : str):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Dict):
_A : Any = F'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
_A : Optional[int] = 'user' if is_user else 'bot'
output += F'{name} >> {text} \n'
return output
@add_end_docstrings(
a_ , R"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , )
class __lowerCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : str):
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
if self.tokenizer.pad_token_id is None:
_A : Any = self.tokenizer.eos_token
def A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Optional[Any]=None , **SCREAMING_SNAKE_CASE : str):
_A : str = {}
_A : Union[str, Any] = {}
_A : List[str] = {}
if min_length_for_response is not None:
_A : Optional[Any] = min_length_for_response
if minimum_tokens is not None:
_A : Tuple = minimum_tokens
if "max_length" in generate_kwargs:
_A : List[Any] = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_A : List[str] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(SCREAMING_SNAKE_CASE)
return preprocess_params, forward_params, postprocess_params
def __call__( self : Any , SCREAMING_SNAKE_CASE : Union[Conversation, List[Conversation]] , SCREAMING_SNAKE_CASE : Any=0 , **SCREAMING_SNAKE_CASE : Tuple):
_A : Any = super().__call__(SCREAMING_SNAKE_CASE , num_workers=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) and len(SCREAMING_SNAKE_CASE) == 1:
return outputs[0]
return outputs
def A ( self : Any , SCREAMING_SNAKE_CASE : Conversation , SCREAMING_SNAKE_CASE : Union[str, Any]=32):
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
raise ValueError('ConversationalPipeline, expects Conversation as inputs')
if conversation.new_user_input is None:
raise ValueError(
F'Conversation with UUID {type(conversation.uuid)} does not contain new user input to process. '
'Add user inputs with the conversation\'s `add_user_input` method')
if hasattr(self.tokenizer , '_build_conversation_input_ids'):
_A : Optional[Any] = self.tokenizer._build_conversation_input_ids(SCREAMING_SNAKE_CASE)
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_A : Dict = self._legacy_parse_and_tokenize(SCREAMING_SNAKE_CASE)
if self.framework == "pt":
_A : Union[str, Any] = torch.LongTensor([input_ids])
elif self.framework == "tf":
_A : Optional[Any] = tf.constant([input_ids])
return {"input_ids": input_ids, "conversation": conversation}
def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str]=10 , **SCREAMING_SNAKE_CASE : Tuple):
_A : str = generate_kwargs.get('max_length' , self.model.config.max_length)
_A : Any = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})')
_A : Dict = max_length - minimum_tokens
_A : int = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
_A : str = model_inputs['attention_mask'][:, -trim:]
_A : Any = model_inputs.pop('conversation')
_A : Optional[Any] = max_length
_A : int = self.model.generate(**SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
if self.model.config.is_encoder_decoder:
_A : int = 1
else:
_A : List[Any] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def A ( self : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any]=True):
_A : Optional[Any] = model_outputs['output_ids']
_A : Optional[int] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE , )
_A : Any = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(SCREAMING_SNAKE_CASE)
return conversation
def A ( self : str , SCREAMING_SNAKE_CASE : Conversation):
_A : Optional[Any] = self.tokenizer.eos_token_id
_A : List[Any] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE) + [eos_token_id])
else:
input_ids.extend(self.tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE))
if len(SCREAMING_SNAKE_CASE) > self.tokenizer.model_max_length:
_A : Dict = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 227 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase = {
'''vocab_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase = {
'''yjernite/retribert-base-uncased''': 512,
}
lowerCamelCase = {
'''yjernite/retribert-base-uncased''': {'''do_lower_case''': True},
}
class _a ( _lowercase):
_a : List[Any] = VOCAB_FILES_NAMES
_a : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_a : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : List[Any] = PRETRAINED_INIT_CONFIGURATION
_a : Optional[Any] = RetriBertTokenizer
_a : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self : List[str] , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : str=True , _SCREAMING_SNAKE_CASE : int="[UNK]" , _SCREAMING_SNAKE_CASE : List[Any]="[SEP]" , _SCREAMING_SNAKE_CASE : Dict="[PAD]" , _SCREAMING_SNAKE_CASE : Optional[int]="[CLS]" , _SCREAMING_SNAKE_CASE : List[str]="[MASK]" , _SCREAMING_SNAKE_CASE : List[Any]=True , _SCREAMING_SNAKE_CASE : int=None , **_SCREAMING_SNAKE_CASE : Union[str, Any] , )-> List[str]:
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
lowerCAmelCase__ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowerCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowerCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowerCAmelCase ) != tokenize_chinese_chars
):
lowerCAmelCase__ : Any = getattr(_lowerCAmelCase , normalizer_state.pop('''type''' ) )
lowerCAmelCase__ : Tuple = do_lower_case
lowerCAmelCase__ : Optional[Any] = strip_accents
lowerCAmelCase__ : int = tokenize_chinese_chars
lowerCAmelCase__ : Optional[int] = normalizer_class(**_lowerCAmelCase )
lowerCAmelCase__ : Optional[Any] = do_lower_case
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any=None )-> Any:
lowerCAmelCase__ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : Optional[List[int]] = None )-> List[int]:
lowerCAmelCase__ : List[str] = [self.sep_token_id]
lowerCAmelCase__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__( self : Optional[int] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[str] = None )-> Tuple[str]:
lowerCAmelCase__ : Optional[int] = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 131 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __lowerCAmelCase ( a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = (16, 32, 96, 256)
_SCREAMING_SNAKE_CASE = jnp.floataa
def lowerCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
snake_case_ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
snake_case_ = []
for i in range(len(self.block_out_channels ) - 1 ):
snake_case_ = self.block_out_channels[i]
snake_case_ = self.block_out_channels[i + 1]
snake_case_ = nn.Conv(
_lowerCAmelCase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_lowerCAmelCase )
snake_case_ = nn.Conv(
_lowerCAmelCase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_lowerCAmelCase )
snake_case_ = blocks
snake_case_ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
snake_case_ = self.conv_in(_lowerCAmelCase )
snake_case_ = nn.silu(_lowerCAmelCase )
for block in self.blocks:
snake_case_ = block(_lowerCAmelCase )
snake_case_ = nn.silu(_lowerCAmelCase )
snake_case_ = self.conv_out(_lowerCAmelCase )
return embedding
@flax_register_to_config
class __lowerCAmelCase ( nn.Module , a , a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = (320, 640, 1280, 1280)
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 8
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = 1280
_SCREAMING_SNAKE_CASE = 0.0
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = jnp.floataa
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = "rgb"
_SCREAMING_SNAKE_CASE = (16, 32, 96, 256)
def lowerCAmelCase__ ( self : str , _lowerCAmelCase : jax.random.KeyArray ) -> FrozenDict:
"""simple docstring"""
# init input tensors
snake_case_ = (1, self.in_channels, self.sample_size, self.sample_size)
snake_case_ = jnp.zeros(_lowerCAmelCase , dtype=jnp.floataa )
snake_case_ = jnp.ones((1,) , dtype=jnp.intaa )
snake_case_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
snake_case_ = (1, 3, self.sample_size * 8, self.sample_size * 8)
snake_case_ = jnp.zeros(_lowerCAmelCase , dtype=jnp.floataa )
snake_case_ , snake_case_ = jax.random.split(_lowerCAmelCase )
snake_case_ = {"params": params_rng, "dropout": dropout_rng}
return self.init(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )["params"]
def lowerCAmelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
snake_case_ = self.block_out_channels
snake_case_ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
snake_case_ = self.num_attention_heads or self.attention_head_dim
# input
snake_case_ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
snake_case_ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
snake_case_ = FlaxTimestepEmbedding(_lowerCAmelCase , dtype=self.dtype )
snake_case_ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
snake_case_ = self.only_cross_attention
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
snake_case_ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
snake_case_ = (num_attention_heads,) * len(self.down_block_types )
# down
snake_case_ = []
snake_case_ = []
snake_case_ = block_out_channels[0]
snake_case_ = nn.Conv(
_lowerCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_lowerCAmelCase )
for i, down_block_type in enumerate(self.down_block_types ):
snake_case_ = output_channel
snake_case_ = block_out_channels[i]
snake_case_ = i == len(_lowerCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
snake_case_ = FlaxCrossAttnDownBlockaD(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
snake_case_ = FlaxDownBlockaD(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowerCAmelCase )
for _ in range(self.layers_per_block ):
snake_case_ = nn.Conv(
_lowerCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_lowerCAmelCase )
if not is_final_block:
snake_case_ = nn.Conv(
_lowerCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_lowerCAmelCase )
snake_case_ = down_blocks
snake_case_ = controlnet_down_blocks
# mid
snake_case_ = block_out_channels[-1]
snake_case_ = FlaxUNetMidBlockaDCrossAttn(
in_channels=_lowerCAmelCase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
snake_case_ = nn.Conv(
_lowerCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : float = 1.0 , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = False , ) -> Union[FlaxControlNetOutput, Tuple]:
"""simple docstring"""
snake_case_ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
snake_case_ = jnp.flip(_lowerCAmelCase , axis=1 )
# 1. time
if not isinstance(_lowerCAmelCase , jnp.ndarray ):
snake_case_ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_lowerCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
snake_case_ = timesteps.astype(dtype=jnp.floataa )
snake_case_ = jnp.expand_dims(_lowerCAmelCase , 0 )
snake_case_ = self.time_proj(_lowerCAmelCase )
snake_case_ = self.time_embedding(_lowerCAmelCase )
# 2. pre-process
snake_case_ = jnp.transpose(_lowerCAmelCase , (0, 2, 3, 1) )
snake_case_ = self.conv_in(_lowerCAmelCase )
snake_case_ = jnp.transpose(_lowerCAmelCase , (0, 2, 3, 1) )
snake_case_ = self.controlnet_cond_embedding(_lowerCAmelCase )
sample += controlnet_cond
# 3. down
snake_case_ = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
snake_case_ , snake_case_ = down_block(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , deterministic=not train )
else:
snake_case_ , snake_case_ = down_block(_lowerCAmelCase , _lowerCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
snake_case_ = self.mid_block(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , deterministic=not train )
# 5. contronet blocks
snake_case_ = ()
for down_block_res_sample, controlnet_block in zip(_lowerCAmelCase , self.controlnet_down_blocks ):
snake_case_ = controlnet_block(_lowerCAmelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
snake_case_ = controlnet_down_block_res_samples
snake_case_ = self.controlnet_mid_block(_lowerCAmelCase )
# 6. scaling
snake_case_ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=_lowerCAmelCase , mid_block_res_sample=_lowerCAmelCase )
| 159 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
UpperCAmelCase = False
class UpperCAmelCase_ ( unittest.TestCase):
pass
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCamelCase ( self : Optional[int] ) -> str:
_UpperCamelCase = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(
image=__UpperCamelCase , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
_UpperCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 54 | """simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( _lowercase):
snake_case__ = ['''image_processor''', '''tokenizer''']
snake_case__ = '''BlipImageProcessor'''
snake_case__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] ) -> int:
_UpperCamelCase = False
super().__init__(__UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = self.image_processor
def __call__( self : Any , __UpperCamelCase : ImageInput = None , __UpperCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __UpperCamelCase : bool = True , __UpperCamelCase : Union[bool, str, PaddingStrategy] = False , __UpperCamelCase : Union[bool, str, TruncationStrategy] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : int = 0 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[str, TensorType]] = None , **__UpperCamelCase : List[str] , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
_UpperCamelCase = self.tokenizer
_UpperCamelCase = self.tokenizer(
text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
return text_encoding
# add pixel_values
_UpperCamelCase = self.image_processor(__UpperCamelCase , return_tensors=__UpperCamelCase )
if text is not None:
_UpperCamelCase = self.tokenizer(
text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
else:
_UpperCamelCase = None
if text_encoding is not None:
encoding_image_processor.update(__UpperCamelCase )
return encoding_image_processor
def _UpperCamelCase ( self : Union[str, Any] , *__UpperCamelCase : str , **__UpperCamelCase : Any ) -> List[Any]:
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def _UpperCamelCase ( self : Optional[int] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : str ) -> str:
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def _UpperCamelCase ( self : List[str] ) -> Dict:
_UpperCamelCase = self.tokenizer.model_input_names
_UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 54 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase :Optional[Any] = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Union[str, Any] = ["""YolosFeatureExtractor"""]
lowerCAmelCase :Tuple = ["""YolosImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :int = [
"""YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""YolosForObjectDetection""",
"""YolosModel""",
"""YolosPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 331 | """simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class snake_case__ ( snake_case_, snake_case_ ):
@register_to_config
def __init__( self , lowerCamelCase = 768 , ):
super().__init__()
__a = nn.Parameter(torch.zeros(1 , lowerCamelCase ) )
__a = nn.Parameter(torch.ones(1 , lowerCamelCase ) )
def a__ ( self , lowerCamelCase = None , lowerCamelCase = None , ):
__a = nn.Parameter(self.mean.to(lowerCamelCase ).to(lowerCamelCase ) )
__a = nn.Parameter(self.std.to(lowerCamelCase ).to(lowerCamelCase ) )
return self
def a__ ( self , lowerCamelCase ):
__a = (embeds - self.mean) * 1.0 / self.std
return embeds
def a__ ( self , lowerCamelCase ):
__a = (embeds * self.std) + self.mean
return embeds
| 261 | 0 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class UpperCamelCase__ ( unittest.TestCase ):
def lowerCAmelCase (self : str ):
__a : List[Any] = torch.nn.Linear(1_0 , 1_0 )
__a : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 )
__a : Union[str, Any] = Accelerator()
__a : Dict = accelerator.prepare(snake_case_ )
try:
pickle.loads(pickle.dumps(snake_case_ ) )
except Exception as e:
self.fail(f"Accelerated optimizer pickling failed with {e}" )
AcceleratorState._reset_state()
| 351 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase (self : Tuple ):
__a : List[str] = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=snake_case_ ).to(snake_case_ )
__a : List[Any] = AutoTokenizer.from_pretrained('''google/mt5-small''' )
__a : Optional[int] = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
__a : Dict = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
__a : Optional[Any] = model(input_ids.to(snake_case_ ) , labels=labels.to(snake_case_ ) ).loss
__a : Tuple = -(labels.shape[-1] * loss.item())
__a : Dict = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 90 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
lowerCAmelCase__ = {
'facebook/mbart-large-en-ro': 10_24,
'facebook/mbart-large-cc25': 10_24,
}
# fmt: off
lowerCAmelCase__ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
__SCREAMING_SNAKE_CASE = MBartTokenizer
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
_A : List[str] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
_A : Union[str, Any] = vocab_file
_A : int = False if not self.vocab_file else True
_A : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens})
_A : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_A : Optional[int] = src_lang if src_lang is not None else "en_XX"
_A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang)
_A : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def _lowerCamelCase ( self) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : List[str] = [self.sep_token_id]
_A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
_A : str = src_lang
_A : Any = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
_A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase)
_A : Dict = tgt_lang_id
return inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "en_XX" , __lowerCamelCase = None , __lowerCamelCase = "ro_RO" , **__lowerCamelCase , ) -> BatchEncoding:
_A : Any = src_lang
_A : int = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self) -> List[str]:
return self.set_src_lang_special_tokens(self.src_lang)
def _lowerCamelCase ( self) -> List[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : int = self.convert_tokens_to_ids(__lowerCamelCase)
_A : int = []
_A : List[str] = [self.eos_token_id, self.cur_lang_code]
_A : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : str = self.convert_ids_to_tokens(self.suffix_tokens)
_A : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Optional[int] = self.convert_tokens_to_ids(__lowerCamelCase)
_A : List[Any] = []
_A : str = [self.eos_token_id, self.cur_lang_code]
_A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : int = self.convert_ids_to_tokens(self.suffix_tokens)
_A : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory.")
return
_A : int = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase):
copyfile(self.vocab_file , __lowerCamelCase)
return (out_vocab_file,)
| 11 |
from __future__ import annotations
from math import pi, sqrt
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 0 |
"""simple docstring"""
UpperCAmelCase: str = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
UpperCAmelCase: Any = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
UpperCAmelCase: int = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 356 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE_ : Optional[int] = "BlipImageProcessor"
SCREAMING_SNAKE_CASE_ : Tuple = "AutoTokenizer"
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
super().__init__(UpperCAmelCase_ ,UpperCAmelCase_ )
# add QFormer tokenizer
_lowercase : Optional[int] = qformer_tokenizer
def __call__( self ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = True ,UpperCAmelCase_ = False ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = 0 ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
_lowercase : List[Any] = BatchFeature()
if text is not None:
_lowercase : List[str] = self.tokenizer(
text=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
encoding.update(UpperCAmelCase_ )
_lowercase : Dict = self.qformer_tokenizer(
text=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
_lowercase : str = qformer_text_encoding.pop("""input_ids""" )
_lowercase : int = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
_lowercase : Optional[Any] = self.image_processor(UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ )
encoding.update(UpperCAmelCase_ )
return encoding
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return self.tokenizer.decode(*UpperCAmelCase_ ,**UpperCAmelCase_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.tokenizer.model_input_names
_lowercase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
if os.path.isfile(UpperCAmelCase_ ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(UpperCAmelCase_ ,exist_ok=UpperCAmelCase_ )
_lowercase : Any = os.path.join(UpperCAmelCase_ ,"""qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(UpperCAmelCase_ )
return super().save_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
_lowercase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCAmelCase_ ,subfolder="""qformer_tokenizer""" )
_lowercase : Any = cls._get_arguments_from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
args.append(UpperCAmelCase_ )
return cls(*UpperCAmelCase_ )
| 336 | 0 |
"""simple docstring"""
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
_A = None
_A = """<""" if sys.byteorder == """little""" else """>"""
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
_A = [
np.dtype("""|b1"""),
np.dtype("""|u1"""),
np.dtype("""<u2"""),
np.dtype(""">u2"""),
np.dtype("""<i2"""),
np.dtype(""">i2"""),
np.dtype("""<u4"""),
np.dtype(""">u4"""),
np.dtype("""<i4"""),
np.dtype(""">i4"""),
np.dtype("""<f4"""),
np.dtype(""">f4"""),
np.dtype("""<f8"""),
np.dtype(""">f8"""),
]
@dataclass
class lowerCamelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = None
# Automatically constructed
SCREAMING_SNAKE_CASE = "PIL.Image.Image"
SCREAMING_SNAKE_CASE = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
SCREAMING_SNAKE_CASE = field(default='Image' , init=lowerCAmelCase__ , repr=lowerCAmelCase__ )
def __call__(self ):
"""simple docstring"""
return self.pa_type
def _a (self , _lowerCamelCase ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
UpperCAmelCase__ : List[Any] = np.array(_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
return {"path": value, "bytes": None}
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
return {"path": None, "bytes": value}
elif isinstance(_lowerCamelCase , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_lowerCamelCase )
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_lowerCamelCase )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
F"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def _a (self , _lowerCamelCase , _lowerCamelCase=None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
UpperCAmelCase__ : Dict = {}
UpperCAmelCase__ , UpperCAmelCase__ : Dict = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(F"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" )
else:
if is_local_path(_lowerCamelCase ):
UpperCAmelCase__ : List[str] = PIL.Image.open(_lowerCamelCase )
else:
UpperCAmelCase__ : List[str] = path.split("""::""" )[-1]
try:
UpperCAmelCase__ : int = string_to_dict(_lowerCamelCase , config.HUB_DATASETS_URL )["""repo_id"""]
UpperCAmelCase__ : str = token_per_repo_id.get(_lowerCamelCase )
except ValueError:
UpperCAmelCase__ : int = None
with xopen(_lowerCamelCase , """rb""" , use_auth_token=_lowerCamelCase ) as f:
UpperCAmelCase__ : Union[str, Any] = BytesIO(f.read() )
UpperCAmelCase__ : Dict = PIL.Image.open(bytes_ )
else:
UpperCAmelCase__ : Dict = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def _a (self ):
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def _a (self , _lowerCamelCase ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
UpperCAmelCase__ : Dict = pa.array([None] * len(_lowerCamelCase ) , type=pa.binary() )
UpperCAmelCase__ : Any = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase__ : int = pa.array([None] * len(_lowerCamelCase ) , type=pa.string() )
UpperCAmelCase__ : Any = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
UpperCAmelCase__ : Optional[Any] = storage.field("""bytes""" )
else:
UpperCAmelCase__ : Dict = pa.array([None] * len(_lowerCamelCase ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
UpperCAmelCase__ : List[Any] = storage.field("""path""" )
else:
UpperCAmelCase__ : Optional[int] = pa.array([None] * len(_lowerCamelCase ) , type=pa.string() )
UpperCAmelCase__ : Optional[int] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCAmelCase__ : Dict = pa.array(
[encode_np_array(np.array(_lowerCamelCase ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
UpperCAmelCase__ : str = pa.array([None] * len(_lowerCamelCase ) , type=pa.string() )
UpperCAmelCase__ : Optional[int] = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(_lowerCamelCase , self.pa_type )
def _a (self , _lowerCamelCase ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(_lowerCamelCase ):
with xopen(_lowerCamelCase , """rb""" ) as f:
UpperCAmelCase__ : int = f.read()
return bytes_
UpperCAmelCase__ : Tuple = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase__ : Tuple = pa.array(
[os.path.basename(_lowerCamelCase ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
UpperCAmelCase__ : List[str] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(_lowerCamelCase , self.pa_type )
def a__ ( ) -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCAmelCase__ : Tuple = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def a__ ( lowerCAmelCase ) -> bytes:
UpperCAmelCase__ : Any = BytesIO()
if image.format in list_image_compression_formats():
UpperCAmelCase__ : Optional[Any] = image.format
else:
UpperCAmelCase__ : Any = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(lowerCAmelCase , format=lowerCAmelCase )
return buffer.getvalue()
def a__ ( lowerCAmelCase ) -> dict:
if hasattr(lowerCAmelCase , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(lowerCAmelCase )}
def a__ ( lowerCAmelCase ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
UpperCAmelCase__ : int = array.dtype
UpperCAmelCase__ : Any = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
UpperCAmelCase__ : Any = dtype.kind
UpperCAmelCase__ : List[Any] = dtype.itemsize
UpperCAmelCase__ : Optional[int] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCAmelCase__ : Any = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCAmelCase__ : str = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCAmelCase__ : Union[str, Any] = dtype_byteorder + dtype_kind + str(lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = np.dtype(lowerCAmelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
UpperCAmelCase__ : Dict = PIL.Image.fromarray(array.astype(lowerCAmelCase ) )
return {"path": None, "bytes": image_to_bytes(lowerCAmelCase )}
def a__ ( lowerCAmelCase ) -> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
UpperCAmelCase__ , UpperCAmelCase__ : str = first_non_null_value(lowerCAmelCase )
if isinstance(lowerCAmelCase , lowerCAmelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(lowerCAmelCase , np.ndarray ):
UpperCAmelCase__ : Tuple = no_op_if_value_is_null(lowerCAmelCase )
return [obj_to_image_dict_func(lowerCAmelCase ) for obj in objs]
elif isinstance(lowerCAmelCase , PIL.Image.Image ):
UpperCAmelCase__ : str = no_op_if_value_is_null(lowerCAmelCase )
return [obj_to_image_dict_func(lowerCAmelCase ) for obj in objs]
else:
return objs
else:
return objs
| 171 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _a (self , _lowerCamelCase ):
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
UpperCAmelCase__ : int = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = """sshleifer/tiny-gpt2"""
UpperCAmelCase__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
UpperCAmelCase__ : int = PyTorchBenchmark(_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = """sgugger/tiny-distilbert-classification"""
UpperCAmelCase__ : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , only_pretrain_model=_lowerCamelCase , )
UpperCAmelCase__ : Tuple = PyTorchBenchmark(_lowerCamelCase )
UpperCAmelCase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = """sshleifer/tiny-gpt2"""
UpperCAmelCase__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , torchscript=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
UpperCAmelCase__ : Optional[int] = PyTorchBenchmark(_lowerCamelCase )
UpperCAmelCase__ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = """sshleifer/tiny-gpt2"""
UpperCAmelCase__ : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , fpaa=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
UpperCAmelCase__ : List[str] = PyTorchBenchmark(_lowerCamelCase )
UpperCAmelCase__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Any = """sshleifer/tiny-gpt2"""
UpperCAmelCase__ : Dict = AutoConfig.from_pretrained(_lowerCamelCase )
# set architectures equal to `None`
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
UpperCAmelCase__ : List[Any] = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
UpperCAmelCase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = """sshleifer/tiny-gpt2"""
UpperCAmelCase__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
UpperCAmelCase__ : int = PyTorchBenchmark(_lowerCamelCase )
UpperCAmelCase__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = """sshleifer/tiny-gpt2"""
UpperCAmelCase__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_lowerCamelCase , multi_process=_lowerCamelCase , )
UpperCAmelCase__ : List[Any] = PyTorchBenchmark(_lowerCamelCase )
UpperCAmelCase__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = """sshleifer/tiny-gpt2"""
UpperCAmelCase__ : Any = AutoConfig.from_pretrained(_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
UpperCAmelCase__ : Tuple = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
UpperCAmelCase__ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = """sshleifer/tinier_bart"""
UpperCAmelCase__ : str = AutoConfig.from_pretrained(_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
UpperCAmelCase__ : Union[str, Any] = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
UpperCAmelCase__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = """sshleifer/tiny-gpt2"""
UpperCAmelCase__ : int = AutoConfig.from_pretrained(_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
UpperCAmelCase__ : Optional[int] = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
UpperCAmelCase__ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = """sshleifer/tinier_bart"""
UpperCAmelCase__ : int = AutoConfig.from_pretrained(_lowerCamelCase )
UpperCAmelCase__ : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
UpperCAmelCase__ : Dict = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
UpperCAmelCase__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , save_to_csv=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_lowerCamelCase , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(_lowerCamelCase , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(_lowerCamelCase , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(_lowerCamelCase , """train_time.csv""" ) , env_info_csv_file=os.path.join(_lowerCamelCase , """env.csv""" ) , multi_process=_lowerCamelCase , )
UpperCAmelCase__ : Dict = PyTorchBenchmark(_lowerCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowerCamelCase , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase , """env.csv""" ) ).exists() )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(_lowerCamelCase ):
self.assertTrue(hasattr(_lowerCamelCase , """sequential""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """cumulative""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """current""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_lowerCamelCase , """log.txt""" ) , log_print=_lowerCamelCase , trace_memory_line_by_line=_lowerCamelCase , multi_process=_lowerCamelCase , )
UpperCAmelCase__ : Optional[int] = PyTorchBenchmark(_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_lowerCamelCase , """log.txt""" ) ).exists() )
| 171 | 1 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def lowercase__( __UpperCamelCase: Any = 8 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = ascii_letters + digits + punctuation
return "".join(secrets.choice(__SCREAMING_SNAKE_CASE ) for _ in range(__SCREAMING_SNAKE_CASE ) )
def lowercase__( __UpperCamelCase: Tuple ,__UpperCamelCase: List[Any] ):
"""simple docstring"""
i -= len(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = i // 3
SCREAMING_SNAKE_CASE : Dict = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = (
chars_incl
+ random(__SCREAMING_SNAKE_CASE ,quotient + remainder )
+ random(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
+ random(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
)
SCREAMING_SNAKE_CASE : Union[str, Any] = list(__SCREAMING_SNAKE_CASE )
shuffle(__SCREAMING_SNAKE_CASE )
return "".join(__SCREAMING_SNAKE_CASE )
# random is a generalised function for letters, characters and numbers
def lowercase__( __UpperCamelCase: List[str] ,__UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
return "".join(secrets.choice(__SCREAMING_SNAKE_CASE ) for _ in range(__SCREAMING_SNAKE_CASE ) )
def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: Optional[Any] ):
"""simple docstring"""
pass # Put your code here...
def lowercase__( __UpperCamelCase: Union[str, Any] ,__UpperCamelCase: str ):
"""simple docstring"""
pass # Put your code here...
def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: Optional[Any] ):
"""simple docstring"""
pass # Put your code here...
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: Dict = 8 ):
"""simple docstring"""
if len(__SCREAMING_SNAKE_CASE ) < min_length:
# Your Password must be at least 8 characters long
return False
SCREAMING_SNAKE_CASE : str = any(char in ascii_uppercase for char in password )
SCREAMING_SNAKE_CASE : Dict = any(char in ascii_lowercase for char in password )
SCREAMING_SNAKE_CASE : List[str] = any(char in digits for char in password )
SCREAMING_SNAKE_CASE : Union[str, Any] = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = int(input('Please indicate the max length of your password: ' ).strip() )
SCREAMING_SNAKE_CASE : Union[str, Any] = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' ,password_generator(__SCREAMING_SNAKE_CASE ) )
print(
'Alternative Password generated:' ,alternative_password_generator(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) ,)
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 370 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"vocab_file": "vocab.txt",
"merges_file": "bpe.codes",
}
UpperCamelCase_ = {
"vocab_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt",
},
"merges_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes",
},
}
UpperCamelCase_ = {
"vinai/phobert-base": 2_5_6,
"vinai/phobert-large": 2_5_6,
}
def lowercase__( __UpperCamelCase: str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = set()
SCREAMING_SNAKE_CASE : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE : int = char
SCREAMING_SNAKE_CASE : str = set(__UpperCamelCase )
return pairs
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Any = VOCAB_FILES_NAMES
A : List[str] = PRETRAINED_VOCAB_FILES_MAP
A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self, A, A, A="<s>", A="</s>", A="</s>", A="<s>", A="<unk>", A="<pad>", A="<mask>", **A, ):
'''simple docstring'''
super().__init__(
bos_token=A, eos_token=A, unk_token=A, sep_token=A, cls_token=A, pad_token=A, mask_token=A, **A, )
SCREAMING_SNAKE_CASE : Tuple = vocab_file
SCREAMING_SNAKE_CASE : str = merges_file
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Any = 1
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : Dict = 3
self.add_from_file(A )
SCREAMING_SNAKE_CASE : Optional[Any] = {v: k for k, v in self.encoder.items()}
with open(A, encoding='utf-8' ) as merges_handle:
SCREAMING_SNAKE_CASE : int = merges_handle.read().split('\n' )[:-1]
SCREAMING_SNAKE_CASE : List[Any] = [tuple(merge.split()[:-1] ) for merge in merges]
SCREAMING_SNAKE_CASE : Tuple = dict(zip(A, range(len(A ) ) ) )
SCREAMING_SNAKE_CASE : List[Any] = {}
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE : int = [self.cls_token_id]
SCREAMING_SNAKE_CASE : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase_ ( self, A, A = None, A = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A, token_ids_a=A, already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return len(self.encoder )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return dict(self.encoder, **self.added_tokens_encoder )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE : Any = tuple(A )
SCREAMING_SNAKE_CASE : List[Any] = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
SCREAMING_SNAKE_CASE : Optional[Any] = get_pairs(A )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE : int = min(A, key=lambda A : self.bpe_ranks.get(A, float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = bigram
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : Optional[Any] = 0
while i < len(A ):
try:
SCREAMING_SNAKE_CASE : str = word.index(A, A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE : List[str] = j
if word[i] == first and i < len(A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE : List[str] = tuple(A )
SCREAMING_SNAKE_CASE : Any = new_word
if len(A ) == 1:
break
else:
SCREAMING_SNAKE_CASE : Optional[Any] = get_pairs(A )
SCREAMING_SNAKE_CASE : Optional[Any] = '@@ '.join(A )
SCREAMING_SNAKE_CASE : Optional[int] = word[:-4]
SCREAMING_SNAKE_CASE : Any = word
return word
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : Tuple = re.findall(r'\S+\n?', A )
for token in words:
split_tokens.extend(list(self.bpe(A ).split(' ' ) ) )
return split_tokens
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
return self.encoder.get(A, self.encoder.get(self.unk_token ) )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
return self.decoder.get(A, self.unk_token )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = ' '.join(A ).replace('@@ ', '' ).strip()
return out_string
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
if not os.path.isdir(A ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(
A, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(
A, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file, A )
if os.path.abspath(self.merges_file ) != os.path.abspath(A ):
copyfile(self.merges_file, A )
return out_vocab_file, out_merge_file
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if isinstance(A, A ):
try:
with open(A, 'r', encoding='utf-8' ) as fd:
self.add_from_file(A )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F"Incorrect encoding detected in {f}, please rebuild the dataset" )
return
SCREAMING_SNAKE_CASE : int = f.readlines()
for lineTmp in lines:
SCREAMING_SNAKE_CASE : List[str] = lineTmp.strip()
SCREAMING_SNAKE_CASE : Optional[Any] = line.rfind(' ' )
if idx == -1:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt>\'' )
SCREAMING_SNAKE_CASE : Optional[int] = line[:idx]
SCREAMING_SNAKE_CASE : Optional[Any] = len(self.encoder )
| 246 | 0 |
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __UpperCamelCase :
def __init__( self, lowerCAmelCase, lowerCAmelCase=14, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=99, lowerCAmelCase=32, lowerCAmelCase=5, lowerCAmelCase=4, lowerCAmelCase=37, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=16, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase=None, ):
"""simple docstring"""
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_token_type_ids
lowerCamelCase_ =use_input_mask
lowerCamelCase_ =use_labels
lowerCamelCase_ =use_mc_token_ids
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_act
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =num_labels
lowerCamelCase_ =num_choices
lowerCamelCase_ =scope
lowerCamelCase_ =self.vocab_size - 1
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase_ =None
if self.use_input_mask:
lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ =None
if self.use_token_type_ids:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase_ =None
if self.use_mc_token_ids:
lowerCamelCase_ =ids_tensor([self.batch_size, self.num_choices], self.seq_length )
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase_ =ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase_ =self.get_config()
lowerCamelCase_ =ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowercase__ ( self ):
"""simple docstring"""
return CTRLConfig(
vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, pad_token_id=self.pad_token_id, )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, *lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =CTRLModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
model(_SCREAMING_SNAKE_CASE, token_type_ids=_SCREAMING_SNAKE_CASE, head_mask=_SCREAMING_SNAKE_CASE )
model(_SCREAMING_SNAKE_CASE, token_type_ids=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ), config.n_layer )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, *lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =CTRLLMHeadModel(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE, token_type_ids=_SCREAMING_SNAKE_CASE, labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
),
) =config_and_inputs
lowerCamelCase_ ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, *lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =CTRLForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE, token_type_ids=_SCREAMING_SNAKE_CASE, labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
@require_torch
class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowercase : Optional[Any] =(CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowercase : List[Any] =(CTRLLMHeadModel,) if is_torch_available() else ()
lowercase : str =(
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase : Union[str, Any] =True
lowercase : Any =False
lowercase : int =False
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =CTRLModelTester(self )
lowerCamelCase_ =ConfigTester(self, config_class=_SCREAMING_SNAKE_CASE, n_embd=37 )
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_SCREAMING_SNAKE_CASE )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_SCREAMING_SNAKE_CASE )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =CTRLModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def lowercase__ ( self ):
"""simple docstring"""
pass
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.tensor(
[[11_859, 0, 1_611, 8]], dtype=torch.long, device=_SCREAMING_SNAKE_CASE ) # Legal the president is
lowerCamelCase_ =[
11_859,
0,
1_611,
8,
5,
150,
26_449,
2,
19,
348,
469,
3,
2_595,
48,
20_740,
246_533,
246_533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
lowerCamelCase_ =model.generate(_SCREAMING_SNAKE_CASE, do_sample=_SCREAMING_SNAKE_CASE )
self.assertListEqual(output_ids[0].tolist(), _SCREAMING_SNAKE_CASE )
| 75 |
from __future__ import annotations
__A : str = 1.60_21E-19 # units = C
def __UpperCamelCase ( _A : float , _A : float , _A : float , ) ->tuple[str, float]:
"""simple docstring"""
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 154 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
UpperCAmelCase_ : Dict = tuple[int, int]
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Optional[int] , lowercase_ : Tuple , lowercase_ : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = vertices
SCREAMING_SNAKE_CASE_ : List[str] = {
(min(__lowercase), max(__lowercase)): weight for edge, weight in edges.items()
}
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : List[Any]):
'''simple docstring'''
self.vertices.add(edge[0])
self.vertices.add(edge[1])
SCREAMING_SNAKE_CASE_ : List[str] = weight
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = Graph({min(self.vertices)} , {})
SCREAMING_SNAKE_CASE_ : int = 42
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 42
SCREAMING_SNAKE_CASE_ : List[str] = 42
SCREAMING_SNAKE_CASE_ : Any = 42
while len(subgraph.vertices) < len(self.vertices):
SCREAMING_SNAKE_CASE_ : Optional[Any] = max(self.edges.values()) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
SCREAMING_SNAKE_CASE_ : Tuple = edge
SCREAMING_SNAKE_CASE_ : Optional[Any] = weight
subgraph.add_edge(__lowercase , __lowercase)
return subgraph
def _A (__a = "p107_network.txt" ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.abspath(os.path.dirname(__a ) )
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(__a , __a )
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
SCREAMING_SNAKE_CASE_ : Dict = 42
SCREAMING_SNAKE_CASE_ : int = 42
SCREAMING_SNAKE_CASE_ : Optional[int] = 42
with open(__a ) as f:
SCREAMING_SNAKE_CASE_ : Dict = f.read().strip().split('''\n''' )
SCREAMING_SNAKE_CASE_ : int = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(__a ) ):
for edgea in range(__a ):
if adjaceny_matrix[edgea][edgea] != "-":
SCREAMING_SNAKE_CASE_ : Tuple = int(adjaceny_matrix[edgea][edgea] )
SCREAMING_SNAKE_CASE_ : str = Graph(set(range(len(__a ) ) ) , __a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = graph.prims_algorithm()
SCREAMING_SNAKE_CASE_ : List[str] = sum(graph.edges.values() )
SCREAMING_SNAKE_CASE_ : Dict = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 359 |
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = ["""model.decoder.embed_positions.weights"""]
def _A (__a ) -> Dict:
"""simple docstring"""
if "emb" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def _A (__a , __a ) -> Tuple[Dict, Dict]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(state_dict.keys() )
SCREAMING_SNAKE_CASE_ : int = {}
for key in keys:
SCREAMING_SNAKE_CASE_ : int = state_dict.pop(__a )
SCREAMING_SNAKE_CASE_ : int = rename_keys(__a )
if "in_proj_weight" in key:
# split fused qkv proj
SCREAMING_SNAKE_CASE_ : List[str] = val[:hidden_size, :]
SCREAMING_SNAKE_CASE_ : List[str] = val[hidden_size : 2 * hidden_size, :]
SCREAMING_SNAKE_CASE_ : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
SCREAMING_SNAKE_CASE_ : int = val
else:
SCREAMING_SNAKE_CASE_ : Any = val
return state_dict, enc_dec_proj_state_dict
def _A (__a ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
SCREAMING_SNAKE_CASE_ : Optional[int] = 10_24
SCREAMING_SNAKE_CASE_ : Tuple = 24
SCREAMING_SNAKE_CASE_ : Optional[Any] = 16
elif checkpoint == "medium":
SCREAMING_SNAKE_CASE_ : List[str] = 15_36
SCREAMING_SNAKE_CASE_ : Optional[int] = 48
SCREAMING_SNAKE_CASE_ : Optional[int] = 24
elif checkpoint == "large":
SCREAMING_SNAKE_CASE_ : Optional[Any] = 20_48
SCREAMING_SNAKE_CASE_ : Optional[int] = 48
SCREAMING_SNAKE_CASE_ : int = 32
else:
raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
SCREAMING_SNAKE_CASE_ : List[Any] = MusicgenDecoderConfig(
hidden_size=__a , ffn_dim=hidden_size * 4 , num_hidden_layers=__a , num_attention_heads=__a , )
return config
@torch.no_grad()
def _A (__a , __a=None , __a=None , __a="cpu" ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = MusicGen.get_pretrained(__a , device=__a )
SCREAMING_SNAKE_CASE_ : Dict = decoder_config_from_checkpoint(__a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = fairseq_model.lm.state_dict()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = rename_state_dict(
__a , hidden_size=decoder_config.hidden_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = TaEncoderModel.from_pretrained('''t5-base''' )
SCREAMING_SNAKE_CASE_ : List[str] = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
SCREAMING_SNAKE_CASE_ : int = MusicgenForCausalLM(__a ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = decoder.load_state_dict(__a , strict=__a )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__a )
if len(__a ) > 0:
raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' )
if len(__a ) > 0:
raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
SCREAMING_SNAKE_CASE_ : str = MusicgenForConditionalGeneration(text_encoder=__a , audio_encoder=__a , decoder=__a )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__a )
# check we can do a forward pass
SCREAMING_SNAKE_CASE_ : Dict = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[Any] = model(input_ids=__a , decoder_input_ids=__a ).logits
if logits.shape != (8, 1, 20_48):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
SCREAMING_SNAKE_CASE_ : str = AutoTokenizer.from_pretrained('''t5-base''' )
SCREAMING_SNAKE_CASE_ : str = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
SCREAMING_SNAKE_CASE_ : Tuple = MusicgenProcessor(feature_extractor=__a , tokenizer=__a )
# set the appropriate bos/pad token ids
SCREAMING_SNAKE_CASE_ : str = 20_48
SCREAMING_SNAKE_CASE_ : List[Any] = 20_48
# set other default generation config params
SCREAMING_SNAKE_CASE_ : int = int(30 * audio_encoder.config.frame_rate )
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = 3.0
if pytorch_dump_folder is not None:
Path(__a ).mkdir(exist_ok=__a )
logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(__a )
processor.save_pretrained(__a )
if repo_id:
logger.info(f'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(__a )
processor.push_to_hub(__a )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
UpperCAmelCase_ : Dict = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 318 | 0 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 70 |
import collections
import importlib.util
import os
import re
from pathlib import Path
a_ = '''src/transformers'''
# Matches is_xxx_available()
a_ = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
a_ = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a_ = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
a_ = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
a_ = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a_ = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
a_ = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
a_ = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
a_ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
a_ = re.compile(r'''^\s*try:''')
# Catches a line with else:
a_ = re.compile(r'''^\s*else:''')
def _a ( UpperCamelCase_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if _re_test_backend.search(UpperCamelCase_ ) is None:
return None
lowerCAmelCase__ = [b[0] for b in _re_backend.findall(UpperCamelCase_ )]
backends.sort()
return "_and_".join(UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[int] ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = 0
while line_index < len(UpperCamelCase_ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCamelCase_ ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase__ = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCamelCase_ ):
lowerCAmelCase__ = _re_one_line_import_struct.search(UpperCamelCase_ ).groups()[0]
lowerCAmelCase__ = re.findall("\[([^\]]+)\]" , UpperCamelCase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
lowerCAmelCase__ = _re_import_struct_key_value.search(UpperCamelCase_ )
if single_line_import_search is not None:
lowerCAmelCase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase__ = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
lowerCAmelCase__ = lines[line_index]
if _re_import_struct_add_one.search(UpperCamelCase_ ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCamelCase_ ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCamelCase_ ) is not None:
lowerCAmelCase__ = _re_import_struct_add_many.search(UpperCamelCase_ ).groups()[0].split(", " )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_between_brackets.search(UpperCamelCase_ ) is not None:
lowerCAmelCase__ = _re_between_brackets.search(UpperCamelCase_ ).groups()[0].split(", " )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_quote_object.search(UpperCamelCase_ ) is not None:
objects.append(_re_quote_object.search(UpperCamelCase_ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase__ = []
while (
line_index < len(UpperCamelCase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase__ = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCamelCase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] ) -> str:
"""simple docstring"""
def find_duplicates(UpperCamelCase_ : str ):
return [k for k, v in collections.Counter(UpperCamelCase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase__ = []
for key in import_dict_objects.keys():
lowerCAmelCase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
lowerCAmelCase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase__ = "base imports" if key == "none" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def _a ( ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = []
for root, _, files in os.walk(UpperCamelCase_ ):
if "__init__.py" in files:
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , "__init__.py" )
lowerCAmelCase__ = parse_init(UpperCamelCase_ )
if objects is not None:
lowerCAmelCase__ = analyze_results(*UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(UpperCamelCase_ ) )
if len(UpperCamelCase_ ) > 0:
raise ValueError("\n\n".join(UpperCamelCase_ ) )
def _a ( ) -> str:
"""simple docstring"""
lowerCAmelCase__ = []
for path, directories, files in os.walk(UpperCamelCase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(UpperCamelCase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCamelCase_ ) / folder).glob("*.py" ) ) ) == 0:
continue
lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / folder).relative_to(UpperCamelCase_ ) )
lowerCAmelCase__ = short_path.replace(os.path.sep , "." )
submodules.append(UpperCamelCase_ )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / fname).relative_to(UpperCamelCase_ ) )
lowerCAmelCase__ = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(UpperCamelCase_ )
return submodules
a_ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def _a ( ) -> int:
"""simple docstring"""
lowerCAmelCase__ = importlib.util.spec_from_file_location(
"transformers" , os.path.join(UpperCamelCase_ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowerCAmelCase__ = spec.loader.load_module()
lowerCAmelCase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = "\n".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 340 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE :Dict = {
'configuration_clip': [
'CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPConfig',
'CLIPOnnxConfig',
'CLIPTextConfig',
'CLIPVisionConfig',
],
'processing_clip': ['CLIPProcessor'],
'tokenization_clip': ['CLIPTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Any = ['CLIPTokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Union[str, Any] = ['CLIPFeatureExtractor']
SCREAMING_SNAKE_CASE :str = ['CLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Union[str, Any] = [
'CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPModel',
'CLIPPreTrainedModel',
'CLIPTextModel',
'CLIPTextModelWithProjection',
'CLIPVisionModel',
'CLIPVisionModelWithProjection',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Optional[int] = [
'TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCLIPModel',
'TFCLIPPreTrainedModel',
'TFCLIPTextModel',
'TFCLIPVisionModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Optional[int] = [
'FlaxCLIPModel',
'FlaxCLIPPreTrainedModel',
'FlaxCLIPTextModel',
'FlaxCLIPTextPreTrainedModel',
'FlaxCLIPVisionModel',
'FlaxCLIPVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 124 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = CTRLTokenizer
snake_case_ = False
snake_case_ = False
def UpperCamelCase_ ( self : Dict ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__A = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
__A = dict(zip(A ,range(len(A ) ) ) )
__A = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
__A = {"unk_token": "<unk>"}
__A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
__A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(A ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(A ) )
def UpperCamelCase_ ( self : List[str] ,**A : List[str] ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname ,**A )
def UpperCamelCase_ ( self : Optional[int] ,A : Tuple ):
__A = "adapt react readapt apt"
__A = "adapt react readapt apt"
return input_text, output_text
def UpperCamelCase_ ( self : Any ):
__A = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
__A = "adapt react readapt apt"
__A = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
__A = tokenizer.tokenize(A )
self.assertListEqual(A ,A )
__A = tokens + [tokenizer.unk_token]
__A = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,A )
| 124 | 1 |
import math
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
A__ = input("""Enter message: """ )
A__ = int(input(F'Enter key [2-{len(__a ) - 1}]: ' ) )
A__ = input("""Encryption/Decryption [e/d]: """ )
if mode.lower().startswith("""e""" ):
A__ = encrypt_message(__a , __a )
elif mode.lower().startswith("""d""" ):
A__ = decrypt_message(__a , __a )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F'Output:\n{text + "|"}' )
def __lowerCamelCase ( __a :int , __a :str ) -> str:
"""simple docstring"""
A__ = [""""""] * key
for col in range(__a ):
A__ = col
while pointer < len(__a ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(__a )
def __lowerCamelCase ( __a :int , __a :str ) -> str:
"""simple docstring"""
A__ = math.ceil(len(__a ) / key )
A__ = key
A__ = (num_cols * num_rows) - len(__a )
A__ = [""""""] * num_cols
A__ = 0
A__ = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
A__ = 0
row += 1
return "".join(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 274 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def __lowerCamelCase ( __a :List[str] , __a :List[Any] , __a :Union[str, Any] , __a :List[Any] ) -> Dict:
"""simple docstring"""
A__ = multiprocessing.Manager()
A__ = manager.list()
A__ = multiprocessing.Process(target=__a , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("""timed out""" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def __lowerCamelCase ( __a :Optional[Any] , __a :Any , __a :List[Any] ) -> Union[str, Any]:
"""simple docstring"""
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
A__ = shutil.rmtree
A__ = os.rmdir
A__ = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
A__ = {}
with swallow_io():
with time_limit(__a ):
exec(__a , __a )
result.append("""passed""" )
except TimeoutException:
result.append("""timed out""" )
except BaseException as e:
result.append(F'failed: {e}' )
# Needed for cleaning up.
A__ = rmtree
A__ = rmdir
A__ = chdir
@contextlib.contextmanager
def __lowerCamelCase ( __a :List[str] ) -> Dict:
"""simple docstring"""
def signal_handler(__a :List[Any] , __a :Optional[Any] ):
raise TimeoutException("""Timed out!""" )
signal.setitimer(signal.ITIMER_REAL , __a )
signal.signal(signal.SIGALRM , __a )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A__ = WriteOnlyStringIO()
with contextlib.redirect_stdout(__a ):
with contextlib.redirect_stderr(__a ):
with redirect_stdin(__a ):
yield
@contextlib.contextmanager
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as dirname:
with chdir(__a ):
yield dirname
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
pass
class A (io.StringIO ):
'''simple docstring'''
def a_ ( self : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
raise OSError
def a_ ( self : Optional[Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
raise OSError
def a_ ( self : Optional[Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : Any ) -> int:
"""simple docstring"""
raise OSError
def a_ ( self : str , *__lowerCAmelCase : Any , **__lowerCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
return False
class A (contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = '''stdin'''
@contextlib.contextmanager
def __lowerCamelCase ( __a :Union[str, Any] ) -> List[str]:
"""simple docstring"""
if root == ".":
yield
return
A__ = os.getcwd()
os.chdir(__a )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__a )
def __lowerCamelCase ( __a :Union[str, Any]=None ) -> Dict:
"""simple docstring"""
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
A__ = None
A__ = None
import os
A__ = """1"""
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
import shutil
A__ = None
A__ = None
A__ = None
import subprocess
A__ = None # type: ignore
A__ = None
import sys
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
| 274 | 1 |
from math import pi, sqrt, tan
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> float:
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> float:
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> float:
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> float:
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> float:
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> float:
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
lowerCAmelCase__ : str = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> float:
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> float:
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(lowerCAmelCase__ , 2 ) * torus_radius * tube_radius
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> float:
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> float:
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> float:
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> float:
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
lowerCAmelCase__ : Dict = (sidea + sidea + sidea) / 2
lowerCAmelCase__ : List[Any] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> float:
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> float:
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> float:
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> float:
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> float:
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> float:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \\nequal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \\nlength of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("""[DEMO] Areas of various geometric shapes: \n""")
print(F"""Rectangle: {area_rectangle(10, 20) = }""")
print(F"""Square: {area_square(10) = }""")
print(F"""Triangle: {area_triangle(10, 10) = }""")
print(F"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(F"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(F"""Rhombus: {area_rhombus(10, 20) = }""")
print(F"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(F"""Circle: {area_circle(20) = }""")
print(F"""Ellipse: {area_ellipse(10, 20) = }""")
print("""\nSurface Areas of various geometric shapes: \n""")
print(F"""Cube: {surface_area_cube(20) = }""")
print(F"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(F"""Sphere: {surface_area_sphere(20) = }""")
print(F"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(F"""Cone: {surface_area_cone(10, 20) = }""")
print(F"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(F"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(F"""Torus: {surface_area_torus(20, 10) = }""")
print(F"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(F"""Square: {area_reg_polygon(4, 10) = }""")
print(F"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""") | 361 |
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[list[int]]:
lowerCAmelCase__ : list[list[int]] = []
create_all_state(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , [] , SCREAMING_SNAKE_CASE_ )
return result
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(SCREAMING_SNAKE_CASE_ , total_number - level + 2 ):
current_list.append(SCREAMING_SNAKE_CASE_ )
create_all_state(i + 1 , SCREAMING_SNAKE_CASE_ , level - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
current_list.pop()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> None:
for i in total_list:
print(*SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = 4
lowerCamelCase__ = 2
lowerCamelCase__ = generate_all_combinations(n, k)
print_all_state(total_list) | 307 | 0 |
def lowerCamelCase__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = int(__lowerCAmelCase )
# Initialize Result
lowerCAmelCase_ = []
# Traverse through all denomination
for denomination in reversed(__lowerCAmelCase ):
# Find denominations
while int(__lowerCAmelCase ) >= int(__lowerCAmelCase ):
total_value -= int(__lowerCAmelCase )
answer.append(__lowerCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
_A = []
_A = "0"
if (
input("Do you want to enter your denominations ? (yY/n): ").strip().lower()
== "y"
):
_A = int(input("Enter the number of denominations you want to add: ").strip())
for i in range(0, n):
denominations.append(int(input(f"""Denomination {i}: """).strip()))
_A = input("Enter the change you want to make in Indian Currency: ").strip()
else:
# All denominations of Indian Currency if user does not enter
_A = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
_A = input("Enter the change you want to make: ").strip()
if int(value) == 0 or int(value) < 0:
print("The total value cannot be zero or negative.")
else:
print(f"""Following is minimal change for {value}: """)
_A = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=" ")
| 231 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class _lowerCAmelCase :
def __init__( self , _UpperCamelCase=2 , _UpperCamelCase=3 , _UpperCamelCase=64 , _UpperCamelCase=None ) -> Optional[Any]:
lowerCAmelCase_ = np.random.default_rng(_UpperCamelCase )
lowerCAmelCase_ = length
lowerCAmelCase_ = rng.normal(size=(length,) ).astype(np.floataa )
lowerCAmelCase_ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> int:
return self.length
def __getitem__( self , _UpperCamelCase ) -> Dict:
return {"x": self.x[i], "y": self.y[i]}
class _lowerCAmelCase ( torch.nn.Module ):
def __init__( self , _UpperCamelCase=0 , _UpperCamelCase=0 , _UpperCamelCase=False ) -> List[Any]:
super().__init__()
lowerCAmelCase_ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase_ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase_ = True
def __a ( self , _UpperCamelCase=None ) -> Any:
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase_ = False
return x * self.a[0] + self.b[0]
class _lowerCAmelCase ( torch.nn.Module ):
def __init__( self , _UpperCamelCase=0 , _UpperCamelCase=0 , _UpperCamelCase=False ) -> Optional[int]:
super().__init__()
lowerCAmelCase_ = torch.nn.Parameter(torch.tensor(_UpperCamelCase ).float() )
lowerCAmelCase_ = torch.nn.Parameter(torch.tensor(_UpperCamelCase ).float() )
lowerCAmelCase_ = True
def __a ( self , _UpperCamelCase=None ) -> int:
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase_ = False
return x * self.a + self.b
def lowerCamelCase__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : int = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
lowerCAmelCase_ = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCAmelCase_ = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
lowerCAmelCase_ = load_dataset("csv" , data_files=__lowerCAmelCase )
lowerCAmelCase_ = datasets["train"].unique("label" )
lowerCAmelCase_ = {v: i for i, v in enumerate(__lowerCAmelCase )}
def tokenize_function(__lowerCAmelCase : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ = tokenizer(
examples["sentence1"] , examples["sentence2"] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , padding="max_length" )
if "label" in examples:
lowerCAmelCase_ = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase_ = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["sentence1", "sentence2", "label"] , )
def collate_fn(__lowerCAmelCase : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(__lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowerCAmelCase_ = DataLoader(tokenized_datasets["train"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=2 )
lowerCAmelCase_ = DataLoader(tokenized_datasets["validation"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 231 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowercase__ : Tuple = logging.get_logger(__name__)
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""pixel_values"""]
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, int]] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , **SCREAMING_SNAKE_CASE_ : int , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = size if size is not None else {'height': 2_2_4, 'width': 2_2_4}
lowerCAmelCase_ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Tuple = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
lowerCAmelCase_ : List[str] = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ , param_name='crop_size' )
lowerCAmelCase_ : List[Any] = do_resize
lowerCAmelCase_ : Any = do_rescale
lowerCAmelCase_ : int = do_normalize
lowerCAmelCase_ : List[str] = do_center_crop
lowerCAmelCase_ : Dict = crop_size
lowerCAmelCase_ : Optional[Any] = size
lowerCAmelCase_ : Tuple = resample
lowerCAmelCase_ : Optional[int] = rescale_factor
lowerCAmelCase_ : Optional[Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCAmelCase_ : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : str , ):
lowerCAmelCase_ : str = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "shortest_edge" in size:
lowerCAmelCase_ : Any = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size['shortest_edge'] , default_to_square=SCREAMING_SNAKE_CASE_ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
lowerCAmelCase_ : Union[str, Any] = (size['height'], size['width'])
else:
raise ValueError(F"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" )
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : int , ):
lowerCAmelCase_ : Any = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['height'], size['width']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Union[float, List[float]] , SCREAMING_SNAKE_CASE_ : Union[float, List[float]] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : str , ):
return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : ImageInput , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : int = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[float] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ : Optional[int] , ):
lowerCAmelCase_ : Optional[int] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase_ : Dict = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase_ : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='crop_size' , default_to_square=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = resample if resample is not None else self.resample
lowerCAmelCase_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ : str = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ : List[str] = image_std if image_std is not None else self.image_std
lowerCAmelCase_ : Tuple = size if size is not None else self.size
lowerCAmelCase_ : str = get_size_dict(SCREAMING_SNAKE_CASE_ )
if not is_batched(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : List[Any] = [images]
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase_ : str = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
lowerCAmelCase_ : Any = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
lowerCAmelCase_ : Optional[int] = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
lowerCAmelCase_ : Tuple = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
lowerCAmelCase_ : str = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images]
lowerCAmelCase_ : Any = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
lowerCAmelCase_ : Optional[Any] = {'pixel_values': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
| 289 |
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class UpperCamelCase__ :
"""simple docstring"""
pass
| 289 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
A_ = None
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
A_ = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
A_ = {
'''google/bigbird-roberta-base''': 40_96,
'''google/bigbird-roberta-large''': 40_96,
'''google/bigbird-base-trivia-itc''': 40_96,
}
A_ = '''▁'''
class lowercase( __a ):
'''simple docstring'''
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = BigBirdTokenizer
lowercase__ = ["input_ids", "attention_mask"]
lowercase__ = []
def __init__( self: int, a_: Union[str, Any]=None, a_: Union[str, Any]=None, a_: Any="<unk>", a_: Tuple="<s>", a_: Tuple="</s>", a_: Dict="<pad>", a_: int="[SEP]", a_: Any="[MASK]", a_: str="[CLS]", **a_: Optional[Any], ):
'''simple docstring'''
_snake_case : Dict = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else bos_token
_snake_case : Union[str, Any] = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else eos_token
_snake_case : str = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else unk_token
_snake_case : Any = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else pad_token
_snake_case : Union[str, Any] = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else cls_token
_snake_case : Optional[Any] = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_snake_case : Optional[int] = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else mask_token
super().__init__(
a_, tokenizer_file=a_, bos_token=a_, eos_token=a_, unk_token=a_, sep_token=a_, pad_token=a_, cls_token=a_, mask_token=a_, **a_, )
_snake_case : Any = vocab_file
_snake_case : Optional[int] = False if not self.vocab_file else True
def UpperCamelCase_ ( self: int, a_: List[int], a_: Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Optional[int] = [self.sep_token_id]
_snake_case : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self: Optional[int], a_: List[int], a_: Optional[List[int]] = None, a_: bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(a_ )) + [1]
return [1] + ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1]
def UpperCamelCase_ ( self: int, a_: List[int], a_: Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : List[str] = [self.sep_token_id]
_snake_case : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self: List[Any], a_: str, a_: Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(a_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_snake_case : int = os.path.join(
a_, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file, a_ )
return (out_vocab_file,)
| 64 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : list ):
"""simple docstring"""
if len(snake_case__ ) <= 1:
return [tuple(snake_case__ )]
_snake_case : List[Any] = []
def generate(snake_case__ : int , snake_case__ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , snake_case__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
_snake_case , _snake_case : Optional[Any] = arr[k - 1], arr[i]
else: # k is odd
_snake_case , _snake_case : List[str] = arr[k - 1], arr[0]
generate(k - 1 , snake_case__ )
generate(len(snake_case__ ) , snake_case__ )
return res
if __name__ == "__main__":
A_ = input('''Enter numbers separated by a comma:\n''').strip()
A_ = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 64 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : List[Any] = '''encoder-decoder'''
snake_case__ : Any = True
def __init__( self : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
super().__init__(**SCREAMING_SNAKE_CASE__ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
a_ : Dict = kwargs.pop('encoder' )
a_ : Optional[int] = encoder_config.pop('model_type' )
a_ : str = kwargs.pop('decoder' )
a_ : str = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
a_ : str = AutoConfig.for_model(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
a_ : Tuple = AutoConfig.for_model(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
a_ : Any = True
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE__ : PretrainedConfig , SCREAMING_SNAKE_CASE__ : PretrainedConfig , **SCREAMING_SNAKE_CASE__ : Dict ) -> PretrainedConfig:
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
a_ : str = True
a_ : int = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
a_ : Any = copy.deepcopy(self.__dict__ )
a_ : int = self.encoder.to_dict()
a_ : Tuple = self.decoder.to_dict()
a_ : Union[str, Any] = self.__class__.model_type
return output
| 120 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
UpperCAmelCase_ : str = 1.054571817e-34 # unit of ℏ : J * s
UpperCAmelCase_ : Dict = 3e8 # unit of c : m * s^-1
def SCREAMING_SNAKE_CASE_ ( __A : float , __A : float , __A : float ) -> dict[str, float]:
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
a_ : Optional[Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_40 * (distance) ** 4
)
return {"force": force}
elif area == 0:
a_ : List[str] = (2_40 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
a_ : Tuple = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_40 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 120 | 1 |
'''simple docstring'''
import requests
__A = "YOUR API KEY"
def _A ( lowercase__ , lowercase__ = giphy_api_key ):
lowercase__ = """+""".join(query.split() )
lowercase__ = f'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
lowercase__ = requests.get(lowercase__ ).json()["""data"""]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("\n".join(get_gifs("space ship")))
| 164 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__A = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 164 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] ="""philschmid/bart-large-cnn-samsum"""
UpperCamelCase__ : Dict =(
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
UpperCamelCase__ : Dict ="""summarizer"""
UpperCamelCase__ : Tuple =AutoTokenizer
UpperCamelCase__ : int =AutoModelForSeqaSeqLM
UpperCamelCase__ : Optional[Any] =["""text"""]
UpperCamelCase__ : List[Any] =["""text"""]
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
return self.pre_processor(lowerCamelCase__ , return_tensors='pt' , truncation=lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
return self.model.generate(**lowerCamelCase__ )[0]
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
return self.pre_processor.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
| 245 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ :Union[str, Any] = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :Optional[Any] = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :Optional[int] = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
A_ :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 245 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Optional[Any] = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class UpperCAmelCase_ (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase : Any = """bert"""
def __init__( self , SCREAMING_SNAKE_CASE_=3_05_22 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Any:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[str] = vocab_size
__lowerCamelCase : Dict = hidden_size
__lowerCamelCase : Optional[int] = num_hidden_layers
__lowerCamelCase : Union[str, Any] = num_attention_heads
__lowerCamelCase : str = hidden_act
__lowerCamelCase : int = intermediate_size
__lowerCamelCase : Any = hidden_dropout_prob
__lowerCamelCase : Optional[int] = attention_probs_dropout_prob
__lowerCamelCase : str = max_position_embeddings
__lowerCamelCase : Any = type_vocab_size
__lowerCamelCase : Tuple = initializer_range
__lowerCamelCase : Dict = layer_norm_eps
__lowerCamelCase : Tuple = position_embedding_type
__lowerCamelCase : Optional[Any] = use_cache
__lowerCamelCase : int = classifier_dropout
class UpperCAmelCase_ (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> Union[str, Any]:
if self.task == "multiple-choice":
__lowerCamelCase : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowerCamelCase : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 185 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_lowerCamelCase =5_0_0_0_0_0
_lowerCamelCase , _lowerCamelCase =os.path.split(__file__)
_lowerCamelCase =os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _a ( lowerCamelCase, **lowerCamelCase ):
lowerCamelCase : Optional[Any] = dataset.map(**lowerCamelCase )
@get_duration
def _a ( lowerCamelCase, **lowerCamelCase ):
lowerCamelCase : Optional[Any] = dataset.filter(**lowerCamelCase )
def _a ( ):
lowerCamelCase : Optional[Any] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : Any = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
lowerCamelCase : Tuple = generate_example_dataset(
os.path.join(lowerCamelCase, """dataset.arrow""" ), lowerCamelCase, num_examples=lowerCamelCase )
lowerCamelCase : Tuple = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""", use_fast=lowerCamelCase )
def tokenize(lowerCamelCase ):
return tokenizer(examples["""text"""] )
lowerCamelCase : List[str] = map(lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, batched=lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""numpy""" ):
lowerCamelCase : Optional[int] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""pandas""" ):
lowerCamelCase : List[str] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""torch""", columns="""numbers""" ):
lowerCamelCase : List[str] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""tensorflow""", columns="""numbers""" ):
lowerCamelCase : Optional[int] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, function=lowerCamelCase, batched=lowerCamelCase )
lowerCamelCase : Union[str, Any] = filter(lowerCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase, """wb""" ) as f:
f.write(json.dumps(lowerCamelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 287 | 0 |
import re
def __UpperCamelCase ( _A : Any ) ->bool:
"""simple docstring"""
lowerCamelCase_ =re.compile(
R"""^(?:0|94|\+94|0{2}94)""" R"""7(0|1|2|4|5|6|7|8)""" R"""(-| |)""" R"""\d{7}$""" )
return bool(re.search(__UpperCAmelCase , __UpperCAmelCase ) )
if __name__ == "__main__":
__A : Union[str, Any] = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 355 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A : Tuple = {'configuration_glpn': ['GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GLPNConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = ['GLPNFeatureExtractor']
__A : Dict = ['GLPNImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'GLPN_PRETRAINED_MODEL_ARCHIVE_LIST',
'GLPNForDepthEstimation',
'GLPNLayer',
'GLPNModel',
'GLPNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 | 0 |
"""simple docstring"""
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def __lowerCamelCase ( a_ : Union[str, Any] , a_ : Any , a_ : int=[] ) -> Any:
__SCREAMING_SNAKE_CASE :Optional[int] = size[0] - overlap_pixels * 2
__SCREAMING_SNAKE_CASE :Any = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
__SCREAMING_SNAKE_CASE :Tuple = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
__SCREAMING_SNAKE_CASE :Dict = np.pad(a_ , mode='''linear_ramp''' , pad_width=a_ , end_values=0 )
if "l" in remove_borders:
__SCREAMING_SNAKE_CASE :Union[str, Any] = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
__SCREAMING_SNAKE_CASE :List[str] = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
__SCREAMING_SNAKE_CASE :List[str] = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
__SCREAMING_SNAKE_CASE :Optional[int] = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def __lowerCamelCase ( a_ : str , a_ : int , a_ : List[str] ) -> str:
return max(a_ , min(a_ , a_ ) )
def __lowerCamelCase ( a_ : [int] , a_ : [int] , a_ : [int] ) -> str:
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def __lowerCamelCase ( a_ : [int] , a_ : int , a_ : [int] ) -> List[str]:
__SCREAMING_SNAKE_CASE :int = list(a_ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
__SCREAMING_SNAKE_CASE :Union[str, Any] = clamp_rect(a_ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def __lowerCamelCase ( a_ : Any , a_ : List[Any] , a_ : Tuple , a_ : Optional[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE :Union[str, Any] = Image.new('''RGB''' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(a_ , (original_slice, 0) )
return result
def __lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[Any] ) -> int:
__SCREAMING_SNAKE_CASE :Optional[int] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
__SCREAMING_SNAKE_CASE :Tuple = tile.crop(a_ )
return tile
def __lowerCamelCase ( a_ : List[str] , a_ : int ) -> Any:
__SCREAMING_SNAKE_CASE :Tuple = n % d
return n - divisor
class _SCREAMING_SNAKE_CASE( A ):
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = 3_50 ,) -> List[str]:
"""simple docstring"""
super().__init__(
vae=SCREAMING_SNAKE_CASE__ ,text_encoder=SCREAMING_SNAKE_CASE__ ,tokenizer=SCREAMING_SNAKE_CASE__ ,unet=SCREAMING_SNAKE_CASE__ ,low_res_scheduler=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ ,max_noise_level=SCREAMING_SNAKE_CASE__ ,)
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE :Tuple = (
min(image.size[0] - (tile_size + original_image_slice) ,x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) ,y * tile_size ),
min(image.size[0] ,(x + 1) * tile_size ),
min(image.size[1] ,(y + 1) * tile_size ),
)
__SCREAMING_SNAKE_CASE :Union[str, Any] = add_overlap_rect(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,image.size )
__SCREAMING_SNAKE_CASE :Tuple = image.crop(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[Any] = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
__SCREAMING_SNAKE_CASE :int = translated_slice_x - (original_image_slice / 2)
__SCREAMING_SNAKE_CASE :int = max(0 ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Any = squeeze_tile(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = to_input.size
__SCREAMING_SNAKE_CASE :List[str] = to_input.resize((tile_size, tile_size) ,Image.BICUBIC )
__SCREAMING_SNAKE_CASE :Union[str, Any] = super(SCREAMING_SNAKE_CASE__ ,self ).__call__(image=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ).images[0]
__SCREAMING_SNAKE_CASE :Any = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) ,Image.BICUBIC )
__SCREAMING_SNAKE_CASE :Union[str, Any] = unsqueeze_tile(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) ,Image.BICUBIC )
__SCREAMING_SNAKE_CASE :Optional[int] = []
if x == 0:
remove_borders.append('''l''' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('''r''' )
if y == 0:
remove_borders.append('''t''' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('''b''' )
__SCREAMING_SNAKE_CASE :List[str] = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) ,tile_border * 4 ,remove_borders=SCREAMING_SNAKE_CASE__ ) ,mode='''L''' ,)
final_image.paste(
SCREAMING_SNAKE_CASE__ ,(crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) ,SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def __call__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = 75 ,SCREAMING_SNAKE_CASE__ = 9.0 ,SCREAMING_SNAKE_CASE__ = 50 ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = 1 ,SCREAMING_SNAKE_CASE__ = 0.0 ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = 1 ,SCREAMING_SNAKE_CASE__ = 1_28 ,SCREAMING_SNAKE_CASE__ = 32 ,SCREAMING_SNAKE_CASE__ = 32 ,) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = Image.new('''RGB''' ,(image.size[0] * 4, image.size[1] * 4) )
__SCREAMING_SNAKE_CASE :List[Any] = math.ceil(image.size[0] / tile_size )
__SCREAMING_SNAKE_CASE :Dict = math.ceil(image.size[1] / tile_size )
__SCREAMING_SNAKE_CASE :Optional[Any] = tcx * tcy
__SCREAMING_SNAKE_CASE :List[Any] = 0
for y in range(SCREAMING_SNAKE_CASE__ ):
for x in range(SCREAMING_SNAKE_CASE__ ):
self._process_tile(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,prompt=SCREAMING_SNAKE_CASE__ ,num_inference_steps=SCREAMING_SNAKE_CASE__ ,guidance_scale=SCREAMING_SNAKE_CASE__ ,noise_level=SCREAMING_SNAKE_CASE__ ,negative_prompt=SCREAMING_SNAKE_CASE__ ,num_images_per_prompt=SCREAMING_SNAKE_CASE__ ,eta=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,latents=SCREAMING_SNAKE_CASE__ ,)
current_count += 1
if callback is not None:
callback({'''progress''': current_count / total_tile_count, '''image''': final_image} )
return final_image
def __lowerCamelCase ( ) -> Any:
# Run a demo
__SCREAMING_SNAKE_CASE :str = '''stabilityai/stable-diffusion-x4-upscaler'''
__SCREAMING_SNAKE_CASE :Any = StableDiffusionTiledUpscalePipeline.from_pretrained(a_ , revision='''fp16''' , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE :Optional[int] = pipe.to('''cuda''' )
__SCREAMING_SNAKE_CASE :Union[str, Any] = Image.open('''../../docs/source/imgs/diffusers_library.jpg''' )
def callback(a_ : Tuple ):
print(f'''progress: {obj["progress"]:.4f}''' )
obj["image"].save('''diffusers_library_progress.jpg''' )
__SCREAMING_SNAKE_CASE :Optional[Any] = pipe(image=a_ , prompt='''Black font, white background, vector''' , noise_level=40 , callback=a_ )
final_image.save('''diffusers_library.jpg''' )
if __name__ == "__main__":
main() | 191 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : str = '''beit'''
def __init__( self ,SCREAMING_SNAKE_CASE__=81_92 ,SCREAMING_SNAKE_CASE__=7_68 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=30_72 ,SCREAMING_SNAKE_CASE__="gelu" ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=1E-12 ,SCREAMING_SNAKE_CASE__=2_24 ,SCREAMING_SNAKE_CASE__=16 ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=[3, 5, 7, 11] ,SCREAMING_SNAKE_CASE__=[1, 2, 3, 6] ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=0.4 ,SCREAMING_SNAKE_CASE__=2_56 ,SCREAMING_SNAKE_CASE__=1 ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=2_55 ,**SCREAMING_SNAKE_CASE__ ,) -> Optional[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Any = vocab_size
__SCREAMING_SNAKE_CASE :Dict = hidden_size
__SCREAMING_SNAKE_CASE :Union[str, Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE :List[str] = num_attention_heads
__SCREAMING_SNAKE_CASE :Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE :Optional[int] = hidden_act
__SCREAMING_SNAKE_CASE :Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE :Optional[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE :Optional[Any] = initializer_range
__SCREAMING_SNAKE_CASE :str = layer_norm_eps
__SCREAMING_SNAKE_CASE :int = image_size
__SCREAMING_SNAKE_CASE :Tuple = patch_size
__SCREAMING_SNAKE_CASE :Any = num_channels
__SCREAMING_SNAKE_CASE :Any = use_mask_token
__SCREAMING_SNAKE_CASE :Union[str, Any] = use_absolute_position_embeddings
__SCREAMING_SNAKE_CASE :Union[str, Any] = use_relative_position_bias
__SCREAMING_SNAKE_CASE :Union[str, Any] = use_shared_relative_position_bias
__SCREAMING_SNAKE_CASE :List[str] = layer_scale_init_value
__SCREAMING_SNAKE_CASE :Optional[Any] = drop_path_rate
__SCREAMING_SNAKE_CASE :str = use_mean_pooling
# decode head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE :Dict = out_indices
__SCREAMING_SNAKE_CASE :Optional[int] = pool_scales
# auxiliary head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE :Optional[int] = use_auxiliary_head
__SCREAMING_SNAKE_CASE :Union[str, Any] = auxiliary_loss_weight
__SCREAMING_SNAKE_CASE :Dict = auxiliary_channels
__SCREAMING_SNAKE_CASE :Optional[int] = auxiliary_num_convs
__SCREAMING_SNAKE_CASE :List[str] = auxiliary_concat_input
__SCREAMING_SNAKE_CASE :List[Any] = semantic_loss_ignore_index
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : List[Any] = version.parse('''1.11''' )
@property
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _UpperCamelCase ( self ) -> float:
"""simple docstring"""
return 1E-4 | 191 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE : Tuple = {
"vocab_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model",
},
"tokenizer_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json",
},
}
SCREAMING_SNAKE_CASE : int = {
"google/fnet-base": 512,
"google/fnet-large": 512,
}
SCREAMING_SNAKE_CASE : Optional[int] = "▁"
class _lowerCamelCase( a__ ):
lowercase_ : str = VOCAB_FILES_NAMES
lowercase_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Dict = ["""input_ids""", """token_type_ids"""]
lowercase_ : str = FNetTokenizer
def __init__( self, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=False, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase="<unk>", lowerCamelCase="[SEP]", lowerCamelCase="<pad>", lowerCamelCase="[CLS]", lowerCamelCase="[MASK]", **lowerCamelCase, ) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[int] = (
AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase, normalized=lowerCamelCase)
if isinstance(lowerCamelCase, lowerCamelCase)
else mask_token
)
super().__init__(
lowerCamelCase, tokenizer_file=lowerCamelCase, do_lower_case=lowerCamelCase, remove_space=lowerCamelCase, keep_accents=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, pad_token=lowerCamelCase, cls_token=lowerCamelCase, mask_token=lowerCamelCase, **lowerCamelCase, )
_lowercase : Union[str, Any] = do_lower_case
_lowercase : Dict = remove_space
_lowercase : str = keep_accents
_lowercase : Union[str, Any] = vocab_file
_lowercase : List[Any] = False if not self.vocab_file else True
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> Dict:
"""simple docstring"""
_lowercase : Optional[int] = [self.sep_token_id]
_lowercase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> Tuple:
"""simple docstring"""
_lowercase : Dict = [self.sep_token_id]
_lowercase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> Optional[Any]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
_lowercase : Optional[Any] = os.path.join(
lowerCamelCase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase):
copyfile(self.vocab_file, lowerCamelCase)
return (out_vocab_file,)
| 370 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _lowerCamelCase( _a ):
lowercase_ : Union[str, Any] = """char"""
lowercase_ : Any = """bpe"""
lowercase_ : Optional[int] = """wp"""
SCREAMING_SNAKE_CASE : Optional[int] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _lowerCamelCase( _a ):
lowercase_ : Any = ["""image_processor""", """char_tokenizer"""]
lowercase_ : Tuple = """ViTImageProcessor"""
lowercase_ : List[str] = """MgpstrTokenizer"""
def __init__( self, lowerCamelCase=None, lowerCamelCase=None, **lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', lowerCamelCase, )
_lowercase : str = kwargs.pop('feature_extractor')
_lowercase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
_lowercase : List[Any] = tokenizer
_lowercase : Tuple = AutoTokenizer.from_pretrained('gpt2')
_lowercase : Tuple = AutoTokenizer.from_pretrained('bert-base-uncased')
super().__init__(lowerCamelCase, lowerCamelCase)
def __call__( self, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, **lowerCamelCase) -> Any:
"""simple docstring"""
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.')
if images is not None:
_lowercase : Optional[Any] = self.image_processor(lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase)
if text is not None:
_lowercase : Optional[int] = self.char_tokenizer(lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase)
if text is None:
return inputs
elif images is None:
return encodings
else:
_lowercase : Optional[int] = encodings['input_ids']
return inputs
def UpperCamelCase ( self, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase , _lowercase , _lowercase : Optional[int] = sequences
_lowercase : str = char_preds.size(0)
_lowercase , _lowercase : List[Any] = self._decode_helper(lowerCamelCase, 'char')
_lowercase , _lowercase : str = self._decode_helper(lowerCamelCase, 'bpe')
_lowercase , _lowercase : str = self._decode_helper(lowerCamelCase, 'wp')
_lowercase : Dict = []
_lowercase : Any = []
for i in range(lowerCamelCase):
_lowercase : Optional[int] = [char_scores[i], bpe_scores[i], wp_scores[i]]
_lowercase : List[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]]
_lowercase : Union[str, Any] = scores.index(max(lowerCamelCase))
final_strs.append(strs[max_score_index])
final_scores.append(scores[max_score_index])
_lowercase : str = {}
_lowercase : int = final_strs
_lowercase : Optional[Any] = final_scores
_lowercase : Tuple = char_strs
_lowercase : Dict = bpe_strs
_lowercase : Tuple = wp_strs
return out
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> str:
"""simple docstring"""
if format == DecodeType.CHARACTER:
_lowercase : Optional[Any] = self.char_decode
_lowercase : int = 1
_lowercase : int = '[s]'
elif format == DecodeType.BPE:
_lowercase : List[Any] = self.bpe_decode
_lowercase : Union[str, Any] = 2
_lowercase : Any = '#'
elif format == DecodeType.WORDPIECE:
_lowercase : int = self.wp_decode
_lowercase : Optional[Any] = 1_02
_lowercase : List[Any] = '[SEP]'
else:
raise ValueError(F'''Format {format} is not supported.''')
_lowercase , _lowercase : Tuple = [], []
_lowercase : str = pred_logits.size(0)
_lowercase : Tuple = pred_logits.size(1)
_lowercase , _lowercase : Dict = pred_logits.topk(1, dim=-1, largest=lowerCamelCase, sorted=lowerCamelCase)
_lowercase : List[str] = preds_index.view(-1, lowerCamelCase)[:, 1:]
_lowercase : int = decoder(lowerCamelCase)
_lowercase , _lowercase : Optional[Any] = torch.nn.functional.softmax(lowerCamelCase, dim=2).max(dim=2)
_lowercase : Optional[Any] = preds_max_prob[:, 1:]
for index in range(lowerCamelCase):
_lowercase : List[str] = preds_str[index].find(lowerCamelCase)
_lowercase : int = preds_str[index][:pred_eos]
_lowercase : List[str] = preds_index[index].cpu().tolist()
_lowercase : Optional[int] = pred_index.index(lowerCamelCase) if eos_token in pred_index else -1
_lowercase : int = preds_max_prob[index][: pred_eos_index + 1]
_lowercase : Tuple = pred_max_prob.cumprod(dim=0)[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(lowerCamelCase)
conf_scores.append(lowerCamelCase)
return dec_strs, conf_scores
def UpperCamelCase ( self, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : Dict = [seq.replace(' ', '') for seq in self.char_tokenizer.batch_decode(lowerCamelCase)]
return decode_strs
def UpperCamelCase ( self, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
return self.bpe_tokenizer.batch_decode(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[Any] = [seq.replace(' ', '') for seq in self.wp_tokenizer.batch_decode(lowerCamelCase)]
return decode_strs
| 84 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __A ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a =AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
a =AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
model.to(__A )
from datasets import load_dataset
a =load_dataset('''nielsr/rvlcdip-demo''' )
a =dataset['''train'''][0]['''image'''].convert('''RGB''' )
a =image_processor(__A , return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
a =model(**__A )
a =outputs.logits
a =torch.Size((1, 16) )
self.assertEqual(logits.shape , __A )
a =torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=__A , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , __A , atol=1E-4 ) ) | 81 |
lowerCamelCase_ = frozenset(
[
'''prompt''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
lowerCamelCase_ = frozenset(['''prompt''', '''negative_prompt'''])
lowerCamelCase_ = frozenset([])
lowerCamelCase_ = frozenset(['''image'''])
lowerCamelCase_ = frozenset(
[
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
lowerCamelCase_ = frozenset(['''image'''])
lowerCamelCase_ = frozenset(
[
'''prompt''',
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
lowerCamelCase_ = frozenset(['''prompt''', '''image''', '''negative_prompt'''])
lowerCamelCase_ = frozenset(
[
# Text guided image variation with an image mask
'''prompt''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
lowerCamelCase_ = frozenset(['''prompt''', '''image''', '''mask_image''', '''negative_prompt'''])
lowerCamelCase_ = frozenset(
[
# image variation with an image mask
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
lowerCamelCase_ = frozenset(['''image''', '''mask_image'''])
lowerCamelCase_ = frozenset(
[
'''example_image''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
lowerCamelCase_ = frozenset(['''example_image''', '''image''', '''mask_image'''])
lowerCamelCase_ = frozenset(['''class_labels'''])
lowerCamelCase_ = frozenset(['''class_labels'''])
lowerCamelCase_ = frozenset(['''batch_size'''])
lowerCamelCase_ = frozenset([])
lowerCamelCase_ = frozenset(['''batch_size'''])
lowerCamelCase_ = frozenset([])
lowerCamelCase_ = frozenset(
[
'''prompt''',
'''audio_length_in_s''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
lowerCamelCase_ = frozenset(['''prompt''', '''negative_prompt'''])
lowerCamelCase_ = frozenset(['''input_tokens'''])
lowerCamelCase_ = frozenset(['''input_tokens'''])
| 244 | 0 |
'''simple docstring'''
from collections import deque
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : str = len(__snake_case )
UpperCAmelCase__ : str = deque()
UpperCAmelCase__ : Optional[Any] = [False for _ in range(__snake_case )]
UpperCAmelCase__ : int = [-1 for _ in range(__snake_case )]
UpperCAmelCase__ : int = index_of[:]
def strong_connect(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : str = index # the number when this node is seen
UpperCAmelCase__ : Optional[int] = index # lowest rank node reachable from here
index += 1
stack.append(__snake_case )
UpperCAmelCase__ : List[str] = True
for w in g[v]:
if index_of[w] == -1:
UpperCAmelCase__ : Dict = strong_connect(__snake_case , __snake_case , __snake_case )
UpperCAmelCase__ : Tuple = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
UpperCAmelCase__ : Optional[int] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
UpperCAmelCase__ : Dict = []
UpperCAmelCase__ : Optional[int] = stack.pop()
UpperCAmelCase__ : Tuple = False
component.append(__snake_case )
while w != v:
UpperCAmelCase__ : List[Any] = stack.pop()
UpperCAmelCase__ : Optional[int] = False
component.append(__snake_case )
components.append(__snake_case )
return index
UpperCAmelCase__ : List[str] = []
for v in range(__snake_case ):
if index_of[v] == -1:
strong_connect(__snake_case , 0 , __snake_case )
return components
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Any = [[] for _ in range(__snake_case )]
for u, v in edges:
g[u].append(__snake_case )
return g
if __name__ == "__main__":
# Test
__A =7
__A =[0, 0, 1, 2, 3, 3, 4, 4, 6]
__A =[1, 3, 2, 0, 1, 4, 5, 6, 5]
__A =[(u, v) for u, v in zip(source, target)]
__A =create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g) | 364 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__A =[
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def _UpperCamelCase ( ):
UpperCAmelCase__ : Union[str, Any] = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCAmelCase__ : Any = g.get_repo("""huggingface/diffusers""" )
UpperCAmelCase__ : Optional[int] = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCAmelCase__ : Any = sorted(issue.get_comments() , key=lambda UpperCamelCase__ : i.created_at , reverse=UpperCamelCase__ )
UpperCAmelCase__ : List[Any] = comments[0] if len(UpperCamelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main() | 283 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__UpperCAmelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
__UpperCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
def _snake_case ( lowercase__ : str , lowercase__ : int=1_0_0 , lowercase__ : int=" " ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = text.split(lowercase__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(lowercase__ ) , lowercase__ )]
def _snake_case ( lowercase__ : dict ) -> dict:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(lowercase__ ):
titles.append(title if title is not None else """""" )
texts.append(lowercase__ )
return {"title": titles, "text": texts}
def _snake_case ( lowercase__ : dict , lowercase__ : DPRContextEncoder , lowercase__ : DPRContextEncoderTokenizerFast ) -> dict:
'''simple docstring'''
lowerCAmelCase_ :Tuple = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=lowercase__ , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
lowerCAmelCase_ :int = ctx_encoder(input_ids.to(device=lowercase__ ) , return_dict=lowercase__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def _snake_case ( lowercase__ : "RagExampleArguments" , lowercase__ : "ProcessingArguments" , lowercase__ : "IndexHnswArguments" , ) -> Optional[Any]:
'''simple docstring'''
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowerCAmelCase_ :Tuple = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowerCAmelCase_ :str = dataset.map(lowercase__ , batched=lowercase__ , num_proc=processing_args.num_proc )
# And compute the embeddings
lowerCAmelCase_ :Optional[int] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=lowercase__ )
lowerCAmelCase_ :List[Any] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowerCAmelCase_ :str = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
lowerCAmelCase_ :str = dataset.map(
partial(lowercase__ , ctx_encoder=lowercase__ , ctx_tokenizer=lowercase__ ) , batched=lowercase__ , batch_size=processing_args.batch_size , features=lowercase__ , )
# And finally save your dataset
lowerCAmelCase_ :Dict = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(lowercase__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowerCAmelCase_ :Tuple = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=lowercase__ )
# And save the index
lowerCAmelCase_ :Union[str, Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(lowercase__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class _SCREAMING_SNAKE_CASE :
UpperCAmelCase_ :str = field(
default=str(Path(A__ ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
UpperCAmelCase_ :Optional[str] = field(
default=A__ , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
UpperCAmelCase_ :str = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
UpperCAmelCase_ :str = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
UpperCAmelCase_ :Optional[str] = field(
default=str(Path(A__ ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class _SCREAMING_SNAKE_CASE :
UpperCAmelCase_ :Optional[int] = field(
default=A__ , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
UpperCAmelCase_ :int = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class _SCREAMING_SNAKE_CASE :
UpperCAmelCase_ :int = field(
default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
UpperCAmelCase_ :int = field(
default=128 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__UpperCAmelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 84 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[Any] = """facebook/bart-large-mnli"""
a__ : int = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
a__ : Optional[Any] = """text_classifier"""
a__ : Any = AutoTokenizer
a__ : str = AutoModelForSequenceClassification
a__ : str = ["""text""", ["""text"""]]
a__ : Optional[int] = ["""text"""]
def UpperCamelCase__ ( self) -> Union[str, Any]:
super().setup()
__UpperCamelCase :int = self.model.config
__UpperCamelCase :Optional[Any] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail'''):
__UpperCamelCase :List[Any] = int(__lowercase)
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''')
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Union[str, Any]:
__UpperCamelCase :Any = labels
return self.pre_processor(
[text] * len(__lowercase) , [f"""This example is {label}""" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def UpperCamelCase__ ( self , __lowercase) -> Optional[Any]:
__UpperCamelCase :List[Any] = outputs.logits
__UpperCamelCase :Any = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 43 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 1_0_0_0 ) -> Tuple:
return sum(e for e in range(3 , __a ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 371 | """simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"--original_config_file",
default=None,
type=str,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--scheduler_type",
default="pndm",
type=str,
help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']",
)
parser.add_argument(
"--pipeline_type",
default=None,
type=str,
help=(
"The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"
". If `None` pipeline will be automatically inferred."
),
)
parser.add_argument(
"--image_size",
default=None,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--prediction_type",
default=None,
type=str,
help=(
"The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"
" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
parser.add_argument(
"--stable_unclip",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.",
)
parser.add_argument(
"--stable_unclip_prior",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.",
)
parser.add_argument(
"--clip_stats_path",
type=str,
help="Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.",
required=False,
)
parser.add_argument(
"--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint."
)
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--vae_path",
type=str,
default=None,
required=False,
help="Set to a path, hub id to an already converted vae to not convert it again.",
)
__A = parser.parse_args()
__A = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 2 | 0 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
__A : List[Any] = True
except (ImportError, ModuleNotFoundError):
__A : List[str] = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def lowercase ( __snake_case : str ):
re.sub('''<n>''' , '''''' , __snake_case ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__snake_case ) )
| 33 |
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: List[Any] =10
SCREAMING_SNAKE_CASE_: Dict =datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
SCREAMING_SNAKE_CASE_: Tuple =datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [97], """text""": ["""1976"""]}] * 10,
"""id""": list(range(lowercase ) ),
} , features=lowercase , )
return dataset
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=lowercase )
return filename
# FILE_CONTENT + files
_UpperCAmelCase = """\
Text data.
Second line of data."""
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =tmp_path_factory.mktemp("""data""" ) / """file.txt"""
SCREAMING_SNAKE_CASE_: str =FILE_CONTENT
with open(lowercase , """w""" ) as f:
f.write(lowercase )
return filename
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
import bza
SCREAMING_SNAKE_CASE_: List[str] =tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
SCREAMING_SNAKE_CASE_: Union[str, Any] =bytes(lowercase , """utf-8""" )
with bza.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
import gzip
SCREAMING_SNAKE_CASE_: List[str] =str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
SCREAMING_SNAKE_CASE_: Dict =bytes(lowercase , """utf-8""" )
with gzip.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
SCREAMING_SNAKE_CASE_: Tuple =tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
SCREAMING_SNAKE_CASE_: List[Any] =bytes(lowercase , """utf-8""" )
with lza.frame.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
SCREAMING_SNAKE_CASE_: Tuple =tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(lowercase , """w""" ) as archive:
archive.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
import tarfile
SCREAMING_SNAKE_CASE_: List[Any] =tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(lowercase , """w""" ) as f:
f.add(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
import lzma
SCREAMING_SNAKE_CASE_: List[str] =tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
SCREAMING_SNAKE_CASE_: List[Any] =bytes(lowercase , """utf-8""" )
with lzma.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
import zipfile
SCREAMING_SNAKE_CASE_: str =tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
SCREAMING_SNAKE_CASE_: Tuple =tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
SCREAMING_SNAKE_CASE_: Dict =bytes(lowercase , """utf-8""" )
with zstd.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =tmp_path_factory.mktemp("""data""" ) / """file.xml"""
SCREAMING_SNAKE_CASE_: Union[str, Any] =textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(lowercase , """w""" ) as f:
f.write(lowercase )
return filename
_UpperCAmelCase = [
{"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0},
]
_UpperCAmelCase = [
{"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0},
{"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0},
]
_UpperCAmelCase = {
"""col_1""": ["""0""", """1""", """2""", """3"""],
"""col_2""": [0, 1, 2, 3],
"""col_3""": [0.0, 1.0, 2.0, 3.0],
}
_UpperCAmelCase = [
{"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0},
{"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1},
]
_UpperCAmelCase = [
{"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0},
]
@pytest.fixture(scope="""session""" )
def __magic_name__ ( ):
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =datasets.Dataset.from_dict(lowercase )
SCREAMING_SNAKE_CASE_: Optional[Any] =str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(lowercase ) ) as con:
SCREAMING_SNAKE_CASE_: int =con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(lowercase , """w""" , newline="""""" ) as f:
SCREAMING_SNAKE_CASE_: int =csv.DictWriter(lowercase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[str] =str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(lowercase , """w""" , newline="""""" ) as f:
SCREAMING_SNAKE_CASE_: Tuple =csv.DictWriter(lowercase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
import bza
SCREAMING_SNAKE_CASE_: Optional[Any] =tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(lowercase , """rb""" ) as f:
SCREAMING_SNAKE_CASE_: Optional[int] =f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: int =tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Any =tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(lowercase , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: str =str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
SCREAMING_SNAKE_CASE_: List[Any] =pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(lowercase , """wb""" ) as f:
SCREAMING_SNAKE_CASE_: int =pq.ParquetWriter(lowercase , schema=lowercase )
SCREAMING_SNAKE_CASE_: str =pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase ) )] for k in DATA[0]} , schema=lowercase )
writer.write_table(lowercase )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
SCREAMING_SNAKE_CASE_: Optional[Any] ={"""data""": DATA}
with open(lowercase , """w""" ) as f:
json.dump(lowercase , lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
SCREAMING_SNAKE_CASE_: Tuple ={"""data""": DATA_DICT_OF_LISTS}
with open(lowercase , """w""" ) as f:
json.dump(lowercase , lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(lowercase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowercase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(lowercase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowercase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(lowercase , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(lowercase , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
import gzip
SCREAMING_SNAKE_CASE_: Any =str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(lowercase , """rb""" ) as orig_file:
with gzip.open(lowercase , """wb""" ) as zipped_file:
zipped_file.writelines(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
import gzip
SCREAMING_SNAKE_CASE_: int =str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(lowercase , """rb""" ) as orig_file:
with gzip.open(lowercase , """wb""" ) as zipped_file:
zipped_file.writelines(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.join("""nested""" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(lowercase , """w""" ) as f:
f.add(lowercase , arcname=os.path.basename(lowercase ) )
f.add(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: int =tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(lowercase , """w""" ) as f:
f.add(lowercase , arcname=os.path.join("""nested""" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =["""0""", """1""", """2""", """3"""]
SCREAMING_SNAKE_CASE_: Optional[Any] =str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(lowercase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =["""0""", """1""", """2""", """3"""]
SCREAMING_SNAKE_CASE_: Dict =str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(lowercase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =["""0""", """1""", """2""", """3"""]
SCREAMING_SNAKE_CASE_: List[str] =tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(lowercase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: int =tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(lowercase , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[str] ="""\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
SCREAMING_SNAKE_CASE_: List[Any] =str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( ):
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def __magic_name__ ( ):
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Any =tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[str] =tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
return data_dir
| 173 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
def SCREAMING_SNAKE_CASE_ () -> Generator[int, None, None]:
lowerCamelCase__ : dict[int, int] = {}
lowerCamelCase__ : Optional[Any] = 2
while True:
lowerCamelCase__ : Union[str, Any] = factor_map.pop(UpperCamelCase , UpperCamelCase )
if factor:
lowerCamelCase__ : Optional[int] = factor + prime
while x in factor_map:
x += factor
lowerCamelCase__ : Optional[Any] = factor
else:
lowerCamelCase__ : Optional[Any] = prime
yield prime
prime += 1
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 1E10 ) -> int:
lowerCamelCase__ : Dict = sieve()
lowerCamelCase__ : List[Any] = 1
while True:
lowerCamelCase__ : Tuple = next(UpperCamelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(UpperCamelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 129 |
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
_A : Optional[Any] =logging.get_logger(__name__)
@add_end_docstrings(_lowercase )
class _lowercase ( _lowercase ):
def __init__( self: Union[str, Any] , **UpperCamelCase__: str ):
super().__init__(**UpperCamelCase__ )
if self.framework != "pt":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self: Optional[Any] , UpperCamelCase__: Union[np.ndarray, bytes, str] , **UpperCamelCase__: int ):
return super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] , **UpperCamelCase__: int ):
lowerCamelCase__ : Optional[Any] = {}
if "candidate_labels" in kwargs:
lowerCamelCase__ : str = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
lowerCamelCase__ : int = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: int , UpperCamelCase__: List[str]=None , UpperCamelCase__: Dict="This is a sound of {}." ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
lowerCamelCase__ : int = requests.get(UpperCamelCase__ ).content
else:
with open(UpperCamelCase__ , """rb""" ) as f:
lowerCamelCase__ : Dict = f.read()
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : str = ffmpeg_read(UpperCamelCase__ , self.feature_extractor.sampling_rate )
if not isinstance(UpperCamelCase__ , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
lowerCamelCase__ : Optional[Any] = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
lowerCamelCase__ : Any = candidate_labels
lowerCamelCase__ : Any = [hypothesis_template.format(UpperCamelCase__ ) for x in candidate_labels]
lowerCamelCase__ : int = self.tokenizer(UpperCamelCase__ , return_tensors=self.framework , padding=UpperCamelCase__ )
lowerCamelCase__ : Tuple = [text_inputs]
return inputs
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : Any = model_inputs.pop("""candidate_labels""" )
lowerCamelCase__ : Union[str, Any] = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , UpperCamelCase__ ):
lowerCamelCase__ : Optional[int] = text_inputs[0]
else:
# Batching case.
lowerCamelCase__ : Tuple = text_inputs[0][0]
lowerCamelCase__ : Tuple = self.model(**UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase__ : str = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : List[str] = model_outputs.pop("""candidate_labels""" )
lowerCamelCase__ : int = model_outputs["""logits"""][0]
if self.framework == "pt":
lowerCamelCase__ : Optional[int] = logits.softmax(dim=0 )
lowerCamelCase__ : Any = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
lowerCamelCase__ : Tuple = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(UpperCamelCase__ , UpperCamelCase__ ) , key=lambda UpperCamelCase__ : -x[0] )
]
return result
| 129 | 1 |
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = n
__lowerCamelCase = [None] * self.n
__lowerCamelCase = 0 # index of the first element
__lowerCamelCase = 0
__lowerCamelCase = 0
def __len__( self ) -> int:
'''simple docstring'''
return self.size
def lowercase_ ( self ) -> bool:
'''simple docstring'''
return self.size == 0
def lowercase_ ( self ) -> str:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
__lowerCamelCase = data
__lowerCamelCase = (self.rear + 1) % self.n
self.size += 1
return self
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
if self.size == 0:
raise Exception('UNDERFLOW' )
__lowerCamelCase = self.array[self.front]
__lowerCamelCase = None
__lowerCamelCase = (self.front + 1) % self.n
self.size -= 1
return temp
| 90 |
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = n
__lowerCamelCase = [None] * self.n
__lowerCamelCase = 0 # index of the first element
__lowerCamelCase = 0
__lowerCamelCase = 0
def __len__( self ) -> int:
'''simple docstring'''
return self.size
def lowercase_ ( self ) -> bool:
'''simple docstring'''
return self.size == 0
def lowercase_ ( self ) -> str:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
__lowerCamelCase = data
__lowerCamelCase = (self.rear + 1) % self.n
self.size += 1
return self
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
if self.size == 0:
raise Exception('UNDERFLOW' )
__lowerCamelCase = self.array[self.front]
__lowerCamelCase = None
__lowerCamelCase = (self.front + 1) % self.n
self.size -= 1
return temp
| 90 | 1 |
def __lowercase ( a__ = 60_08_51_47_51_43 ) -> int:
try:
__SCREAMING_SNAKE_CASE = int(a__ )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__SCREAMING_SNAKE_CASE = i
while n % i == 0:
__SCREAMING_SNAKE_CASE = n // i
i += 1
return int(a__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 118 |
def __lowercase ( a__ ) -> int:
__SCREAMING_SNAKE_CASE = [[0 for _ in range(a__ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__SCREAMING_SNAKE_CASE = 1
for n in range(m + 1 ):
for k in range(1 , a__ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
lowerCAmelCase__ : Optional[Any] =int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
lowerCAmelCase__ : str =int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 118 | 1 |
# Lint as: python3
import itertools
import os
import re
UpperCAmelCase_ = re.compile(r'([A-Z]+)([A-Z][a-z])')
UpperCAmelCase_ = re.compile(r'([a-z\d])([A-Z])')
UpperCAmelCase_ = re.compile(r'(?<!_)_(?!_)')
UpperCAmelCase_ = re.compile(r'(_{2,})')
UpperCAmelCase_ = r'^\w+(\.\w+)*$'
UpperCAmelCase_ = r'<>:/\|?*'
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
__lowerCamelCase = _uppercase_uppercase_re.sub(R"""\1_\2""" , A__ )
__lowerCamelCase = _lowercase_uppercase_re.sub(R"""\1_\2""" , A__ )
return name.lower()
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
__lowerCamelCase = _single_underscore_re.split(A__ )
__lowerCamelCase = [_multiple_underscores_re.split(A__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(A__ ) if n != """""" )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
if os.path.basename(A__ ) != name:
raise ValueError(f'Should be a dataset name, not a path: {name}' )
return camelcase_to_snakecase(A__ )
def lowerCamelCase__ ( A__ : Dict , A__ : Dict ):
'''simple docstring'''
if os.path.basename(A__ ) != name:
raise ValueError(f'Should be a dataset name, not a path: {name}' )
if not re.match(_split_re , A__ ):
raise ValueError(f'Split name should match \'{_split_re}\'\' but got \'{split}\'.' )
return f'{filename_prefix_for_name(A__ )}-{split}'
def lowerCamelCase__ ( A__ : int , A__ : Tuple , A__ : Optional[Any] , A__ : str=None ):
'''simple docstring'''
__lowerCamelCase = filename_prefix_for_split(A__ , A__ )
if filetype_suffix:
prefix += f'.{filetype_suffix}'
__lowerCamelCase = os.path.join(A__ , A__ )
return f'{filepath}*'
def lowerCamelCase__ ( A__ : List[str] , A__ : List[Any] , A__ : int , A__ : int=None , A__ : Any=None ):
'''simple docstring'''
__lowerCamelCase = filename_prefix_for_split(A__ , A__ )
__lowerCamelCase = os.path.join(A__ , A__ )
if shard_lengths:
__lowerCamelCase = len(A__ )
__lowerCamelCase = [f'{prefix}-{shard_id:05d}-of-{num_shards:05d}' for shard_id in range(A__ )]
if filetype_suffix:
__lowerCamelCase = [filename + f'.{filetype_suffix}' for filename in filenames]
return filenames
else:
__lowerCamelCase = prefix
if filetype_suffix:
filename += f'.{filetype_suffix}'
return [filename]
| 12 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a_ = logging.get_logger(__name__)
a_ = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __snake_case ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """nat"""
_lowerCamelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , __lowerCamelCase=4 , __lowerCamelCase=3 , __lowerCamelCase=64 , __lowerCamelCase=[3, 4, 6, 5] , __lowerCamelCase=[2, 4, 8, 16] , __lowerCamelCase=7 , __lowerCamelCase=3.0 , __lowerCamelCase=True , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-5 , __lowerCamelCase=0.0 , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
__A : Union[str, Any] = patch_size
__A : Optional[Any] = num_channels
__A : Tuple = embed_dim
__A : Dict = depths
__A : str = len(__lowerCamelCase )
__A : Optional[Any] = num_heads
__A : str = kernel_size
__A : Any = mlp_ratio
__A : Optional[int] = qkv_bias
__A : str = hidden_dropout_prob
__A : Any = attention_probs_dropout_prob
__A : int = drop_path_rate
__A : int = hidden_act
__A : Any = layer_norm_eps
__A : Tuple = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__A : int = int(embed_dim * 2 ** (len(__lowerCamelCase ) - 1) )
__A : Union[str, Any] = layer_scale_init_value
__A : List[str] = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(__lowerCamelCase ) + 1 )]
__A , __A : Any = get_aligned_output_features_output_indices(
out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names )
| 179 | 0 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
UpperCamelCase__ : Dict = imread(r'digital_image_processing/image_data/lena_small.jpg')
UpperCamelCase__ : Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
def UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A_ : Tuple = cn.convert_to_negative(_UpperCamelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_UpperCamelCase , 1_1_0 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
A_ : List[str] = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def UpperCAmelCase ( ) -> Any:
"""simple docstring"""
A_ : Any = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A_ : Optional[Any] = canny.canny(_UpperCamelCase )
# assert canny array for at least one True
assert canny_array.any()
def UpperCAmelCase ( ) -> Any:
"""simple docstring"""
assert gg.gaussian_filter(_UpperCamelCase , 5 , sigma=0.9 ).all()
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
A_ : Any = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A_ : Tuple = conv.img_convolve(_UpperCamelCase , _UpperCamelCase ).astype(_UpperCamelCase )
assert res.any()
def UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
assert med.median_filter(_UpperCamelCase , 3 ).any()
def UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
A_ , A_ : Tuple = sob.sobel_filter(_UpperCamelCase )
assert grad.any() and theta.any()
def UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
A_ : str = sp.make_sepia(_UpperCamelCase , 2_0 )
assert sepia.all()
def UpperCAmelCase ( a_ = "digital_image_processing/image_data/lena_small.jpg" ) -> str:
"""simple docstring"""
A_ : List[Any] = bs.Burkes(imread(_UpperCamelCase , 1 ) , 1_2_0 )
burkes.process()
assert burkes.output_img.any()
def UpperCAmelCase ( a_ = "digital_image_processing/image_data/lena_small.jpg" , ) -> Dict:
"""simple docstring"""
A_ : Tuple = rs.NearestNeighbour(imread(_UpperCamelCase , 1 ) , 4_0_0 , 2_0_0 )
nn.process()
assert nn.output.any()
def UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
A_ : List[str] = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
A_ : str = imread(_UpperCamelCase , 0 )
# Test for get_neighbors_pixel function() return not None
A_ : str = 0
A_ : Union[str, Any] = 0
A_ : int = image[x_coordinate][y_coordinate]
A_ : Tuple = lbp.get_neighbors_pixel(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A_ : Tuple = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
A_ : Union[str, Any] = lbp.local_binary_value(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert lbp_image.any()
| 359 |
'''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : Optional[Any] = 'T5Config'
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''mt5'''
lowerCamelCase = MTaConfig
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''mt5'''
lowerCamelCase = MTaConfig
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''mt5'''
lowerCamelCase = MTaConfig
| 164 | 0 |
"""simple docstring"""
def _snake_case ( ):
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(lowercase__ , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F"{solution() = }") | 96 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : int = '''openai-gpt'''
A : str = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self, A=40_478, A=512, A=768, A=12, A=12, A="gelu", A=0.1, A=0.1, A=0.1, A=1E-5, A=0.02, A="cls_index", A=True, A=None, A=True, A=0.1, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = n_positions
SCREAMING_SNAKE_CASE : List[str] = n_embd
SCREAMING_SNAKE_CASE : Optional[Any] = n_layer
SCREAMING_SNAKE_CASE : Optional[Any] = n_head
SCREAMING_SNAKE_CASE : str = afn
SCREAMING_SNAKE_CASE : List[str] = resid_pdrop
SCREAMING_SNAKE_CASE : int = embd_pdrop
SCREAMING_SNAKE_CASE : Optional[Any] = attn_pdrop
SCREAMING_SNAKE_CASE : List[str] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = summary_type
SCREAMING_SNAKE_CASE : Tuple = summary_use_proj
SCREAMING_SNAKE_CASE : Dict = summary_activation
SCREAMING_SNAKE_CASE : Tuple = summary_first_dropout
SCREAMING_SNAKE_CASE : List[str] = summary_proj_to_labels
super().__init__(**A )
| 251 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
snake_case = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'width_multiplier' ) )
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=64 , lowerCAmelCase=2 , lowerCAmelCase=3 , lowerCAmelCase="swish" , lowerCAmelCase=3 , lowerCAmelCase=32 , lowerCAmelCase=0.1 , lowerCAmelCase=0.02 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=10 , lowerCAmelCase=None , lowerCAmelCase=0.25 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , ):
"""simple docstring"""
snake_case = parent
snake_case = batch_size
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = make_divisible(5_12 * width_multiplier , divisor=8 )
snake_case = hidden_act
snake_case = conv_kernel_size
snake_case = output_stride
snake_case = classifier_dropout_prob
snake_case = use_labels
snake_case = is_training
snake_case = num_labels
snake_case = initializer_range
snake_case = scope
snake_case = width_multiplier
snake_case = ffn_dropout
snake_case = attn_dropout
def snake_case ( self ):
"""simple docstring"""
snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case = None
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.num_labels )
snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case ( self ):
"""simple docstring"""
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = MobileViTVaModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = self.num_labels
snake_case = MobileViTVaForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = self.num_labels
snake_case = MobileViTVaForSemanticSegmentation(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
snake_case = model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.prepare_config_and_inputs()
snake_case ,snake_case ,snake_case ,snake_case = config_and_inputs
snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : List[str] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_lowerCAmelCase : Tuple = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : Optional[int] = False
_lowerCAmelCase : int = False
_lowerCAmelCase : Any = False
_lowerCAmelCase : Union[str, Any] = False
def snake_case ( self ):
"""simple docstring"""
snake_case = MobileViTVaModelTester(self )
snake_case = MobileViTVaConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds' )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings' )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='MobileViTV2 does not output attentions' )
def snake_case ( self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
snake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(lowerCAmelCase )
snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case = [*signature.parameters.keys()]
snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
snake_case = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
snake_case = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
snake_case = outputs.hidden_states
snake_case = 5
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
snake_case = 2
for i in range(len(lowerCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
snake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = MobileViTVaModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def lowerCAmelCase__ ( ) -> Tuple:
"""simple docstring"""
snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self ):
"""simple docstring"""
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' )
if is_vision_available()
else None
)
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to(
lowerCAmelCase )
snake_case = self.default_image_processor
snake_case = prepare_img()
snake_case = image_processor(images=lowerCAmelCase , return_tensors='pt' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
snake_case = model(**lowerCAmelCase )
# verify the logits
snake_case = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
snake_case = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
snake_case = model.to(lowerCAmelCase )
snake_case = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
snake_case = prepare_img()
snake_case = image_processor(images=lowerCAmelCase , return_tensors='pt' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
snake_case = model(**lowerCAmelCase )
snake_case = outputs.logits
# verify the logits
snake_case = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , lowerCAmelCase )
snake_case = torch.tensor(
[
[[7.08_63, 7.15_25, 6.82_01], [6.69_31, 6.87_70, 6.89_33], [6.29_78, 7.03_66, 6.96_36]],
[[-3.71_34, -3.67_12, -3.66_75], [-3.58_25, -3.35_49, -3.47_77], [-3.34_35, -3.39_79, -3.28_57]],
[[-2.93_29, -2.80_03, -2.73_69], [-3.05_64, -2.47_80, -2.02_07], [-2.68_89, -1.92_98, -1.76_40]],
] , device=lowerCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCAmelCase , atol=1E-4 ) )
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
snake_case = model.to(lowerCAmelCase )
snake_case = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
snake_case = prepare_img()
snake_case = image_processor(images=lowerCAmelCase , return_tensors='pt' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
snake_case = model(**lowerCAmelCase )
snake_case = outputs.logits.detach().cpu()
snake_case = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase , target_sizes=[(50, 60)] )
snake_case = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , lowerCAmelCase )
snake_case = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase )
snake_case = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , lowerCAmelCase )
| 149 | """simple docstring"""
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def lowerCAmelCase__ ( _UpperCamelCase : Any ) -> int:
"""simple docstring"""
snake_case = {}
snake_case = job['started_at']
snake_case = job['completed_at']
snake_case = date_parser.parse(_UpperCamelCase )
snake_case = date_parser.parse(_UpperCamelCase )
snake_case = round((end_datetime - start_datetime).total_seconds() / 60.0 )
snake_case = start
snake_case = end
snake_case = duration_in_min
return job_info
def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : Any=None ) -> Union[str, Any]:
"""simple docstring"""
snake_case = None
if token is not None:
snake_case = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""}
snake_case = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
snake_case = requests.get(_UpperCamelCase , headers=_UpperCamelCase ).json()
snake_case = {}
try:
job_time.update({job['name']: extract_time_from_single_job(_UpperCamelCase ) for job in result['jobs']} )
snake_case = math.ceil((result['total_count'] - 1_0_0) / 1_0_0 )
for i in range(_UpperCamelCase ):
snake_case = requests.get(url + f"""&page={i + 2}""" , headers=_UpperCamelCase ).json()
job_time.update({job['name']: extract_time_from_single_job(_UpperCamelCase ) for job in result['jobs']} )
return job_time
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = get_job_time(args.workflow_run_id)
SCREAMING_SNAKE_CASE__ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"""{k}: {v['duration']}""")
| 149 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
snake_case : List[str] = logging.get_logger(__name__)
snake_case : int = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'blenderbot-small'
SCREAMING_SNAKE_CASE__ = ['past_key_values']
SCREAMING_SNAKE_CASE__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _lowerCamelCase=5_0265 , _lowerCamelCase=512 , _lowerCamelCase=8 , _lowerCamelCase=2048 , _lowerCamelCase=16 , _lowerCamelCase=8 , _lowerCamelCase=2048 , _lowerCamelCase=16 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="gelu" , _lowerCamelCase=512 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , _lowerCamelCase=1 , _lowerCamelCase=False , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , _lowerCamelCase=2 , **_lowerCamelCase , ):
a :Dict = vocab_size
a :Optional[Any] = max_position_embeddings
a :str = d_model
a :Any = encoder_ffn_dim
a :Optional[int] = encoder_layers
a :List[str] = encoder_attention_heads
a :List[str] = decoder_ffn_dim
a :Optional[int] = decoder_layers
a :str = decoder_attention_heads
a :List[str] = dropout
a :Optional[int] = attention_dropout
a :Dict = activation_dropout
a :List[str] = activation_function
a :List[Any] = init_std
a :Optional[int] = encoder_layerdrop
a :Tuple = decoder_layerdrop
a :List[str] = use_cache
a :int = encoder_layers
a :Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , forced_eos_token_id=_lowerCamelCase , **_lowerCamelCase , )
class _snake_case ( _snake_case ):
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
a :Optional[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
a :Union[str, Any] = {0: '''batch'''}
a :Tuple = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
a :Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''}
a :str = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_lowerCamelCase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
a :Optional[int] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
a , a :str = self.num_layers
for i in range(_lowerCamelCase ):
a :List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
a :List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
a :Optional[int] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
a :List[Any] = super().outputs
else:
a :Union[str, Any] = super(_lowerCamelCase , self ).outputs
if self.use_past:
a , a :int = self.num_layers
for i in range(_lowerCamelCase ):
a :int = {0: '''batch''', 2: '''past_sequence + sequence'''}
a :Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ):
a :Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Generate decoder inputs
a :Dict = seq_length if not self.use_past else 1
a :Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
a :List[Any] = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
a :List[str] = dict(**_lowerCamelCase , **_lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
a , a :Optional[Any] = common_inputs['''input_ids'''].shape
a :Tuple = common_inputs['''decoder_input_ids'''].shape[1]
a , a :List[Any] = self.num_attention_heads
a :List[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
a :int = decoder_seq_length + 3
a :Union[str, Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
a :Union[str, Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(_lowerCamelCase , _lowerCamelCase )] , dim=1 )
a :List[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
a , a :Optional[int] = self.num_layers
a :str = min(_lowerCamelCase , _lowerCamelCase )
a :str = max(_lowerCamelCase , _lowerCamelCase ) - min_num_layers
a :Tuple = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(_lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
) )
# TODO: test this.
a :int = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(_lowerCamelCase , _lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) )
return common_inputs
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ):
a :Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
a , a :Dict = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
a :Optional[int] = seqlen + 2
a , a :Union[str, Any] = self.num_layers
a , a :Optional[Any] = self.num_attention_heads
a :str = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
a :Tuple = common_inputs['''attention_mask'''].dtype
a :Any = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(_lowerCamelCase , _lowerCamelCase , dtype=_lowerCamelCase )] , dim=1 )
a :Any = [
(torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) for _ in range(_lowerCamelCase )
]
return common_inputs
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
a :Optional[Any] = compute_effective_axis_dimension(
_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
a :Optional[int] = tokenizer.num_special_tokens_to_add(_lowerCamelCase )
a :Tuple = compute_effective_axis_dimension(
_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
a :List[str] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
a :Dict = dict(tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase ) )
return common_inputs
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
a :Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
elif self.task == "causal-lm":
a :Dict = self._generate_dummy_inputs_for_causal_lm(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
else:
a :Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
return common_inputs
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if self.task in ["default", "seq2seq-lm"]:
a :Optional[int] = super()._flatten_past_key_values_(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
a :Any = super(_lowerCamelCase , self )._flatten_past_key_values_(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
| 94 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
snake_case : Union[str, Any] = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int]=None ):
"""simple docstring"""
require_version(deps[pkg] , UpperCAmelCase_ )
| 94 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 38 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCamelCase = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 38 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'BridgeTower/bridgetower-base': 'https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json',
'BridgeTower/bridgetower-base-itm-mlm': (
'https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'
),
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = '''bridgetower_vision_model'''
def __init__( self : int , lowerCAmelCase__ : List[Any]=7_6_8 , lowerCAmelCase__ : Dict=1_2 , lowerCAmelCase__ : Union[str, Any]=3 , lowerCAmelCase__ : Dict=1_6 , lowerCAmelCase__ : int=2_8_8 , lowerCAmelCase__ : Dict=1 , lowerCAmelCase__ : int=1e-05 , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : int=True , lowerCAmelCase__ : int=False , **lowerCAmelCase__ : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Tuple = patch_size
_UpperCAmelCase : str = image_size
_UpperCAmelCase : List[Any] = initializer_factor
_UpperCAmelCase : Any = layer_norm_eps
_UpperCAmelCase : Optional[Any] = stop_gradient
_UpperCAmelCase : List[str] = share_layernorm
_UpperCAmelCase : List[str] = remove_last_layer
@classmethod
def _lowerCAmelCase ( cls : Optional[Any] , lowerCAmelCase__ : Union[str, os.PathLike] , **lowerCAmelCase__ : Any ) -> "PretrainedConfig":
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : Any = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
if config_dict.get("model_type" ) == "bridgetower":
_UpperCAmelCase : Optional[Any] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = '''bridgetower_text_model'''
def __init__( self : int , lowerCAmelCase__ : Optional[int]=5_0_2_6_5 , lowerCAmelCase__ : Tuple=7_6_8 , lowerCAmelCase__ : List[str]=1_2 , lowerCAmelCase__ : int=1_2 , lowerCAmelCase__ : str=1 , lowerCAmelCase__ : Optional[Any]=3_0_7_2 , lowerCAmelCase__ : List[Any]="gelu" , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : int=5_1_4 , lowerCAmelCase__ : List[Any]=1 , lowerCAmelCase__ : Any=1e-05 , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : List[Any]=0 , lowerCAmelCase__ : Dict=2 , lowerCAmelCase__ : List[Any]="absolute" , lowerCAmelCase__ : Optional[Any]=True , **lowerCAmelCase__ : Any , ) -> List[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
_UpperCAmelCase : List[str] = vocab_size
_UpperCAmelCase : Optional[Any] = hidden_size
_UpperCAmelCase : Tuple = num_hidden_layers
_UpperCAmelCase : Dict = num_attention_heads
_UpperCAmelCase : Dict = hidden_act
_UpperCAmelCase : int = initializer_factor
_UpperCAmelCase : List[str] = intermediate_size
_UpperCAmelCase : Optional[Any] = hidden_dropout_prob
_UpperCAmelCase : Any = attention_probs_dropout_prob
_UpperCAmelCase : Tuple = max_position_embeddings
_UpperCAmelCase : Any = type_vocab_size
_UpperCAmelCase : Union[str, Any] = layer_norm_eps
_UpperCAmelCase : Optional[Any] = position_embedding_type
_UpperCAmelCase : Optional[int] = use_cache
_UpperCAmelCase : Optional[Any] = pad_token_id
_UpperCAmelCase : Union[str, Any] = bos_token_id
_UpperCAmelCase : int = eos_token_id
@classmethod
def _lowerCAmelCase ( cls : Tuple , lowerCAmelCase__ : Union[str, os.PathLike] , **lowerCAmelCase__ : Dict ) -> "PretrainedConfig":
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : str = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
if config_dict.get("model_type" ) == "bridgetower":
_UpperCAmelCase : int = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Any = '''bridgetower'''
def __init__( self : List[str] , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Any="gelu" , lowerCAmelCase__ : Dict=7_6_8 , lowerCAmelCase__ : Any=1 , lowerCAmelCase__ : List[str]=1e-05 , lowerCAmelCase__ : int=False , lowerCAmelCase__ : str="add" , lowerCAmelCase__ : Tuple=1_2 , lowerCAmelCase__ : Optional[int]=6 , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : Optional[Any] , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = kwargs.pop("text_config_dict" , lowerCAmelCase__ )
_UpperCAmelCase : int = kwargs.pop("vision_config_dict" , lowerCAmelCase__ )
super().__init__(**lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = share_cross_modal_transformer_layers
_UpperCAmelCase : int = hidden_act
_UpperCAmelCase : str = hidden_size
_UpperCAmelCase : Tuple = initializer_factor
_UpperCAmelCase : Union[str, Any] = layer_norm_eps
_UpperCAmelCase : Tuple = share_link_tower_layers
_UpperCAmelCase : List[str] = link_tower_type
_UpperCAmelCase : int = num_attention_heads
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : Optional[int] = tie_word_embeddings
_UpperCAmelCase : int = init_layernorm_from_vision_encoder
if text_config is None:
_UpperCAmelCase : str = {}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." )
if vision_config is None:
_UpperCAmelCase : Union[str, Any] = {}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." )
_UpperCAmelCase : str = BridgeTowerTextConfig(**lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = BridgeTowerVisionConfig(**lowerCAmelCase__ )
@classmethod
def _lowerCAmelCase ( cls : Union[str, Any] , lowerCAmelCase__ : BridgeTowerTextConfig , lowerCAmelCase__ : BridgeTowerVisionConfig , **lowerCAmelCase__ : Union[str, Any] ) -> Any:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase__ )
def _lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : str = copy.deepcopy(self.__dict__ )
_UpperCAmelCase : Union[str, Any] = self.text_config.to_dict()
_UpperCAmelCase : Union[str, Any] = self.vision_config.to_dict()
_UpperCAmelCase : List[str] = self.__class__.model_type
return output | 145 | '''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
__a = yaml.safe_load(
'\\nname: ""\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: "Dataset Card for X" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: "Table of Contents"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Dataset Description"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: "Dataset Summary"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Supported Tasks and Leaderboards"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n'
)
__a = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Extra Ignored Subsection',
'text': '',
'is_empty_text': True,
'subsections': [],
}
],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
__a = '\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = (
'The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'
)
__a = '\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = (
'The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'
)
__a = '\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'
__a = ''
__a = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = 'The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'
@pytest.mark.parametrize(
"readme_md, expected_dict", [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
], )
def __UpperCAmelCase ( a_: Any, a_: Any ):
assert ReadMe.from_string(a_, a_ ).to_dict() == expected_dict
@pytest.mark.parametrize(
"readme_md, expected_error", [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
], )
def __UpperCAmelCase ( a_: Optional[int], a_: int ):
with pytest.raises(a_, match=re.escape(expected_error.format(path="root" ) ) ):
_UpperCAmelCase : Union[str, Any] = ReadMe.from_string(a_, a_ )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error", [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
], )
def __UpperCAmelCase ( a_: str, a_: Optional[Any] ):
with pytest.raises(a_, match=re.escape(expected_error.format(path="root" ) ) ):
ReadMe.from_string(a_, a_ )
@pytest.mark.parametrize(
"readme_md,", [
(README_MULTIPLE_SAME_HEADING_1),
], )
def __UpperCAmelCase ( a_: str ):
ReadMe.from_string(a_, a_, suppress_parsing_errors=a_ )
@pytest.mark.parametrize(
"readme_md, expected_dict", [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
], )
def __UpperCAmelCase ( a_: str, a_: Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : Optional[Any] = Path(a_ ) / "README.md"
with open(a_, "w+" ) as readme_file:
readme_file.write(a_ )
_UpperCAmelCase : Tuple = ReadMe.from_readme(a_, a_ ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"readme_md, expected_error", [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
], )
def __UpperCAmelCase ( a_: List[Any], a_: str ):
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : Optional[Any] = Path(a_ ) / "README.md"
with open(a_, "w+" ) as readme_file:
readme_file.write(a_ )
_UpperCAmelCase : Any = expected_error.format(path=a_ )
with pytest.raises(a_, match=re.escape(a_ ) ):
_UpperCAmelCase : str = ReadMe.from_readme(a_, a_ )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error", [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
], )
def __UpperCAmelCase ( a_: Tuple, a_: Optional[int] ):
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : List[str] = Path(a_ ) / "README.md"
with open(a_, "w+" ) as readme_file:
readme_file.write(a_ )
_UpperCAmelCase : Any = expected_error.format(path=a_ )
with pytest.raises(a_, match=re.escape(a_ ) ):
ReadMe.from_readme(a_, a_ )
@pytest.mark.parametrize(
"readme_md,", [
(README_MULTIPLE_SAME_HEADING_1),
], )
def __UpperCAmelCase ( a_: Any ):
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : List[Any] = Path(a_ ) / "README.md"
with open(a_, "w+" ) as readme_file:
readme_file.write(a_ )
ReadMe.from_readme(a_, a_, suppress_parsing_errors=a_ ) | 145 | 1 |
"""simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
A_ : Optional[Any] =re.compile(R"""\b(a|an|the)\b""", re.UNICODE)
A_ : Optional[int] =None
def SCREAMING_SNAKE_CASE_ ( )-> str:
_lowerCamelCase = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' )
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' )
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh' , '-t' , type=snake_case , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=snake_case , help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose' , '-v' , action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[Any] )-> List[str]:
_lowerCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_lowerCamelCase = bool(qa['answers']['text'] )
return qid_to_has_ans
def SCREAMING_SNAKE_CASE_ ( snake_case : Tuple )-> Dict:
def remove_articles(snake_case : str ):
return ARTICLES_REGEX.sub(' ' , snake_case )
def white_space_fix(snake_case : List[str] ):
return " ".join(text.split() )
def remove_punc(snake_case : str ):
_lowerCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(snake_case : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case ) ) ) )
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[Any] )-> Tuple:
if not s:
return []
return normalize_answer(snake_case ).split()
def SCREAMING_SNAKE_CASE_ ( snake_case : List[Any] , snake_case : int )-> str:
return int(normalize_answer(snake_case ) == normalize_answer(snake_case ) )
def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : str )-> Union[str, Any]:
_lowerCamelCase = get_tokens(snake_case )
_lowerCamelCase = get_tokens(snake_case )
_lowerCamelCase = collections.Counter(snake_case ) & collections.Counter(snake_case )
_lowerCamelCase = sum(common.values() )
if len(snake_case ) == 0 or len(snake_case ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
_lowerCamelCase = 1.0 * num_same / len(snake_case )
_lowerCamelCase = 1.0 * num_same / len(snake_case )
_lowerCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def SCREAMING_SNAKE_CASE_ ( snake_case : Union[str, Any] , snake_case : Optional[Any] )-> List[Any]:
_lowerCamelCase = {}
_lowerCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_lowerCamelCase = qa['id']
_lowerCamelCase = [t for t in qa['answers']['text'] if normalize_answer(snake_case )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
_lowerCamelCase = ['']
if qid not in preds:
print(f'Missing prediction for {qid}' )
continue
_lowerCamelCase = preds[qid]
# Take max over all gold answers
_lowerCamelCase = max(compute_exact(snake_case , snake_case ) for a in gold_answers )
_lowerCamelCase = max(compute_fa(snake_case , snake_case ) for a in gold_answers )
return exact_scores, fa_scores
def SCREAMING_SNAKE_CASE_ ( snake_case : Any , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Tuple )-> Tuple:
_lowerCamelCase = {}
for qid, s in scores.items():
_lowerCamelCase = na_probs[qid] > na_prob_thresh
if pred_na:
_lowerCamelCase = float(not qid_to_has_ans[qid] )
else:
_lowerCamelCase = s
return new_scores
def SCREAMING_SNAKE_CASE_ ( snake_case : List[str] , snake_case : List[str] , snake_case : Tuple=None )-> List[Any]:
if not qid_list:
_lowerCamelCase = len(snake_case )
return collections.OrderedDict(
[
('exact', 1_0_0.0 * sum(exact_scores.values() ) / total),
('f1', 1_0_0.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
_lowerCamelCase = len(snake_case )
return collections.OrderedDict(
[
('exact', 1_0_0.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 1_0_0.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def SCREAMING_SNAKE_CASE_ ( snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : List[Any] )-> int:
for k in new_eval:
_lowerCamelCase = new_eval[k]
def SCREAMING_SNAKE_CASE_ ( snake_case : Any , snake_case : List[str] , snake_case : Optional[int] , snake_case : Optional[Any] )-> List[Any]:
plt.step(snake_case , snake_case , color='b' , alpha=0.2 , where='post' )
plt.fill_between(snake_case , snake_case , step='post' , alpha=0.2 , color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.0_5] )
plt.ylim([0.0, 1.0_5] )
plt.title(snake_case )
plt.savefig(snake_case )
plt.clf()
def SCREAMING_SNAKE_CASE_ ( snake_case : Union[str, Any] , snake_case : int , snake_case : List[str] , snake_case : Any , snake_case : Optional[Any]=None , snake_case : Optional[Any]=None )-> List[Any]:
_lowerCamelCase = sorted(snake_case , key=lambda snake_case : na_probs[k] )
_lowerCamelCase = 0.0
_lowerCamelCase = 1.0
_lowerCamelCase = 0.0
_lowerCamelCase = [1.0]
_lowerCamelCase = [0.0]
_lowerCamelCase = 0.0
for i, qid in enumerate(snake_case ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
_lowerCamelCase = true_pos / float(i + 1 )
_lowerCamelCase = true_pos / float(snake_case )
if i == len(snake_case ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(snake_case )
recalls.append(snake_case )
if out_image:
plot_pr_curve(snake_case , snake_case , snake_case , snake_case )
return {"ap": 1_0_0.0 * avg_prec}
def SCREAMING_SNAKE_CASE_ ( snake_case : Tuple , snake_case : Dict , snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : Any , snake_case : Tuple )-> str:
if out_image_dir and not os.path.exists(snake_case ):
os.makedirs(snake_case )
_lowerCamelCase = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
_lowerCamelCase = make_precision_recall_eval(
snake_case , snake_case , snake_case , snake_case , out_image=os.path.join(snake_case , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , )
_lowerCamelCase = make_precision_recall_eval(
snake_case , snake_case , snake_case , snake_case , out_image=os.path.join(snake_case , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , )
_lowerCamelCase = {k: float(snake_case ) for k, v in qid_to_has_ans.items()}
_lowerCamelCase = make_precision_recall_eval(
snake_case , snake_case , snake_case , snake_case , out_image=os.path.join(snake_case , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(snake_case , snake_case , 'pr_exact' )
merge_eval(snake_case , snake_case , 'pr_f1' )
merge_eval(snake_case , snake_case , 'pr_oracle' )
def SCREAMING_SNAKE_CASE_ ( snake_case : int , snake_case : Tuple , snake_case : List[str] , snake_case : List[Any] )-> Tuple:
if not qid_list:
return
_lowerCamelCase = [na_probs[k] for k in qid_list]
_lowerCamelCase = np.ones_like(snake_case ) / float(len(snake_case ) )
plt.hist(snake_case , weights=snake_case , bins=20 , range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(f'Histogram of no-answer probability: {name}' )
plt.savefig(os.path.join(snake_case , f'na_prob_hist_{name}.png' ) )
plt.clf()
def SCREAMING_SNAKE_CASE_ ( snake_case : Any , snake_case : Optional[int] , snake_case : Dict , snake_case : Optional[int] )-> Union[str, Any]:
_lowerCamelCase = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
_lowerCamelCase = num_no_ans
_lowerCamelCase = cur_score
_lowerCamelCase = 0.0
_lowerCamelCase = sorted(snake_case , key=lambda snake_case : na_probs[k] )
for i, qid in enumerate(snake_case ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
_lowerCamelCase = scores[qid]
else:
if preds[qid]:
_lowerCamelCase = -1
else:
_lowerCamelCase = 0
cur_score += diff
if cur_score > best_score:
_lowerCamelCase = cur_score
_lowerCamelCase = na_probs[qid]
return 1_0_0.0 * best_score / len(snake_case ), best_thresh
def SCREAMING_SNAKE_CASE_ ( snake_case : int , snake_case : Optional[Any] , snake_case : Any , snake_case : Optional[int] , snake_case : int , snake_case : str )-> str:
_lowerCamelCase , _lowerCamelCase = find_best_thresh(snake_case , snake_case , snake_case , snake_case )
_lowerCamelCase , _lowerCamelCase = find_best_thresh(snake_case , snake_case , snake_case , snake_case )
_lowerCamelCase = best_exact
_lowerCamelCase = exact_thresh
_lowerCamelCase = best_fa
_lowerCamelCase = fa_thresh
def SCREAMING_SNAKE_CASE_ ( )-> Union[str, Any]:
with open(OPTS.data_file ) as f:
_lowerCamelCase = json.load(snake_case )
_lowerCamelCase = dataset_json['data']
with open(OPTS.pred_file ) as f:
_lowerCamelCase = json.load(snake_case )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
_lowerCamelCase = json.load(snake_case )
else:
_lowerCamelCase = {k: 0.0 for k in preds}
_lowerCamelCase = make_qid_to_has_ans(snake_case ) # maps qid to True/False
_lowerCamelCase = [k for k, v in qid_to_has_ans.items() if v]
_lowerCamelCase = [k for k, v in qid_to_has_ans.items() if not v]
_lowerCamelCase , _lowerCamelCase = get_raw_scores(snake_case , snake_case )
_lowerCamelCase = apply_no_ans_threshold(snake_case , snake_case , snake_case , OPTS.na_prob_thresh )
_lowerCamelCase = apply_no_ans_threshold(snake_case , snake_case , snake_case , OPTS.na_prob_thresh )
_lowerCamelCase = make_eval_dict(snake_case , snake_case )
if has_ans_qids:
_lowerCamelCase = make_eval_dict(snake_case , snake_case , qid_list=snake_case )
merge_eval(snake_case , snake_case , 'HasAns' )
if no_ans_qids:
_lowerCamelCase = make_eval_dict(snake_case , snake_case , qid_list=snake_case )
merge_eval(snake_case , snake_case , 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(snake_case , snake_case , snake_case , snake_case , snake_case , OPTS.out_image_dir )
histogram_na_prob(snake_case , snake_case , OPTS.out_image_dir , 'hasAns' )
histogram_na_prob(snake_case , snake_case , OPTS.out_image_dir , 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file , 'w' ) as f:
json.dump(snake_case , snake_case )
else:
print(json.dumps(snake_case , indent=2 ) )
if __name__ == "__main__":
A_ : List[str] =parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 80 |
"""simple docstring"""
from math import factorial
def SCREAMING_SNAKE_CASE_ ( snake_case : int = 20 )-> int:
_lowerCamelCase = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
_lowerCamelCase = n // 2
return int(factorial(snake_case ) / (factorial(snake_case ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
A_ : Optional[Any] =int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number.""")
| 80 | 1 |
'''simple docstring'''
import math
import sys
def UpperCamelCase_ ( _UpperCAmelCase : str ) -> str:
"""simple docstring"""
_UpperCAmelCase : Any = ""
try:
with open(_UpperCAmelCase , "rb" ) as binary_file:
_UpperCAmelCase : Union[str, Any] = binary_file.read()
for dat in data:
_UpperCAmelCase : int = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def UpperCamelCase_ ( _UpperCAmelCase : str ) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = {"0": "0", "1": "1"}
_UpperCAmelCase , _UpperCAmelCase : str = "", ""
_UpperCAmelCase : Any = len(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_UpperCAmelCase : List[Any] = lexicon[curr_string]
result += last_match_id
_UpperCAmelCase : Tuple = last_match_id + "0"
if math.loga(_UpperCAmelCase ).is_integer():
_UpperCAmelCase : List[Any] = {}
for curr_key in list(_UpperCAmelCase ):
_UpperCAmelCase : Any = lexicon.pop(_UpperCAmelCase )
_UpperCAmelCase : int = new_lex
_UpperCAmelCase : Dict = last_match_id + "1"
index += 1
_UpperCAmelCase : Tuple = ""
return result
def UpperCamelCase_ ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> None:
"""simple docstring"""
_UpperCAmelCase : Tuple = 8
try:
with open(_UpperCAmelCase , "wb" ) as opened_file:
_UpperCAmelCase : str = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_UpperCAmelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def UpperCamelCase_ ( _UpperCAmelCase : str ) -> str:
"""simple docstring"""
_UpperCAmelCase : Tuple = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
_UpperCAmelCase : List[Any] = data_bits[counter:]
_UpperCAmelCase : Union[str, Any] = data_bits[counter + 1 :]
return data_bits
def UpperCamelCase_ ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> None:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = read_file_binary(_UpperCAmelCase )
_UpperCAmelCase : List[Any] = remove_prefix(_UpperCAmelCase )
_UpperCAmelCase : Tuple = decompress_data(_UpperCAmelCase )
write_file_binary(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 31 | '''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def __init__( self : Optional[int] , A : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
super().__init__()
_UpperCAmelCase : Optional[int] = nn.ModuleList(A )
def _A ( self : Dict , A : torch.FloatTensor , A : Union[torch.Tensor, float, int] , A : torch.Tensor , A : List[torch.tensor] , A : List[float] , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[Dict[str, Any]] = None , A : bool = False , A : bool = True , ):
for i, (image, scale, controlnet) in enumerate(zip(A , A , self.nets ) ):
_UpperCAmelCase , _UpperCAmelCase : str = controlnet(
A , A , A , A , A , A , A , A , A , A , A , )
# merge samples
if i == 0:
_UpperCAmelCase , _UpperCAmelCase : List[Any] = down_samples, mid_sample
else:
_UpperCAmelCase : Optional[int] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(A , A )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def _A ( self : List[str] , A : Union[str, os.PathLike] , A : bool = True , A : Callable = None , A : bool = False , A : Optional[str] = None , ):
_UpperCAmelCase : str = 0
_UpperCAmelCase : str = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
A , is_main_process=A , save_function=A , safe_serialization=A , variant=A , )
idx += 1
_UpperCAmelCase : Tuple = model_path_to_save + F"""_{idx}"""
@classmethod
def _A ( cls : int , A : Optional[Union[str, os.PathLike]] , **A : Tuple ):
_UpperCAmelCase : str = 0
_UpperCAmelCase : int = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_UpperCAmelCase : int = pretrained_model_path
while os.path.isdir(A ):
_UpperCAmelCase : List[str] = ControlNetModel.from_pretrained(A , **A )
controlnets.append(A )
idx += 1
_UpperCAmelCase : Dict = pretrained_model_path + F"""_{idx}"""
logger.info(F"""{len(A )} controlnets loaded from {pretrained_model_path}.""" )
if len(A ) == 0:
raise ValueError(
F"""No ControlNets found under {os.path.dirname(A )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(A )
| 31 | 1 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Any = logging.get_logger(__name__)
A : int = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : List[str] = '''data2vec-audio'''
def __init__( self : Any , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Tuple=7_68 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : Optional[Any]=30_72 , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : List[str]=0.0_2 , __lowerCAmelCase : str=1e-5 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : str=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __lowerCAmelCase : Tuple=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase : Union[str, Any]=(10, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Union[str, Any]=16 , __lowerCAmelCase : str=19 , __lowerCAmelCase : str=5 , __lowerCAmelCase : str=0.0_5 , __lowerCAmelCase : int=10 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : List[Any]=0.0 , __lowerCAmelCase : List[str]=10 , __lowerCAmelCase : Any=0 , __lowerCAmelCase : str="sum" , __lowerCAmelCase : Dict=False , __lowerCAmelCase : str=False , __lowerCAmelCase : Tuple=2_56 , __lowerCAmelCase : Dict=(5_12, 5_12, 5_12, 5_12, 15_00) , __lowerCAmelCase : Dict=(5, 3, 3, 1, 1) , __lowerCAmelCase : List[Any]=(1, 2, 3, 1, 1) , __lowerCAmelCase : List[Any]=5_12 , __lowerCAmelCase : int=0 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Any=None , **__lowerCAmelCase : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
A__ = hidden_size
A__ = feat_extract_activation
A__ = list(__lowerCAmelCase )
A__ = list(__lowerCAmelCase )
A__ = list(__lowerCAmelCase )
A__ = conv_bias
A__ = num_conv_pos_embeddings
A__ = num_conv_pos_embedding_groups
A__ = conv_pos_kernel_size
A__ = len(self.conv_dim )
A__ = num_hidden_layers
A__ = intermediate_size
A__ = hidden_act
A__ = num_attention_heads
A__ = hidden_dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = feat_proj_dropout
A__ = final_dropout
A__ = layerdrop
A__ = layer_norm_eps
A__ = initializer_range
A__ = vocab_size
A__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A__ = mask_time_prob
A__ = mask_time_length
A__ = mask_time_min_masks
A__ = mask_feature_prob
A__ = mask_feature_length
A__ = mask_feature_min_masks
# ctc loss
A__ = ctc_loss_reduction
A__ = ctc_zero_infinity
# adapter
A__ = add_adapter
A__ = adapter_kernel_size
A__ = adapter_stride
A__ = num_adapter_layers
A__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
A__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
A__ = list(__lowerCAmelCase )
A__ = list(__lowerCAmelCase )
A__ = list(__lowerCAmelCase )
A__ = xvector_output_dim
@property
def a_ ( self : Dict ) -> Any:
"""simple docstring"""
return math.prod(self.conv_stride )
| 362 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
A : Dict = random.Random()
def __lowerCamelCase ( __a :Dict , __a :str=1.0 , __a :List[Any]=None , __a :List[str]=None ) -> Optional[int]:
"""simple docstring"""
if rng is None:
A__ = global_rng
A__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class A (unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : str=7 , __lowerCAmelCase : List[Any]=4_00 , __lowerCAmelCase : Optional[Any]=20_00 , __lowerCAmelCase : Dict=10 , __lowerCAmelCase : Union[str, Any]=1_60 , __lowerCAmelCase : List[Any]=8 , __lowerCAmelCase : List[Any]=0.0 , __lowerCAmelCase : int=40_00 , __lowerCAmelCase : Any=False , __lowerCAmelCase : List[str]=True , ) -> Optional[int]:
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = min_seq_length
A__ = max_seq_length
A__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ = padding_value
A__ = sampling_rate
A__ = return_attention_mask
A__ = do_normalize
A__ = feature_size
A__ = chunk_length
A__ = hop_length
def a_ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a_ ( self : List[str] , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : int=False ) -> str:
"""simple docstring"""
def _flatten(__lowerCAmelCase : Optional[int] ):
return list(itertools.chain(*__lowerCAmelCase ) )
if equal_length:
A__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ = [np.asarray(__lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A (SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : int = WhisperFeatureExtractor if is_speech_available() else None
def a_ ( self : Any ) -> str:
"""simple docstring"""
A__ = WhisperFeatureExtractionTester(self )
def a_ ( self : Dict ) -> Tuple:
"""simple docstring"""
A__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = feat_extract_first.save_pretrained(__lowerCAmelCase )[0]
check_json_file_has_correct_format(__lowerCAmelCase )
A__ = self.feature_extraction_class.from_pretrained(__lowerCAmelCase )
A__ = feat_extract_first.to_dict()
A__ = feat_extract_second.to_dict()
A__ = feat_extract_first.mel_filters
A__ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : Dict ) -> str:
"""simple docstring"""
A__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = os.path.join(__lowerCAmelCase , """feat_extract.json""" )
feat_extract_first.to_json_file(__lowerCAmelCase )
A__ = self.feature_extraction_class.from_json_file(__lowerCAmelCase )
A__ = feat_extract_first.to_dict()
A__ = feat_extract_second.to_dict()
A__ = feat_extract_first.mel_filters
A__ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
A__ = [np.asarray(__lowerCAmelCase ) for speech_input in speech_inputs]
# Test feature size
A__ = feature_extractor(__lowerCAmelCase , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
A__ = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
A__ = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-3 ) )
# Test batched
A__ = feature_extractor(__lowerCAmelCase , return_tensors="""np""" ).input_features
A__ = feature_extractor(__lowerCAmelCase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
A__ = np.asarray(__lowerCAmelCase )
A__ = feature_extractor(__lowerCAmelCase , return_tensors="""np""" ).input_features
A__ = feature_extractor(__lowerCAmelCase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-3 ) )
# Test truncation required
A__ = [floats_list((1, x) )[0] for x in range(2_00 , (feature_extractor.n_samples + 5_00) , 2_00 )]
A__ = [np.asarray(__lowerCAmelCase ) for speech_input in speech_inputs]
A__ = [x[: feature_extractor.n_samples] for x in speech_inputs]
A__ = [np.asarray(__lowerCAmelCase ) for speech_input in speech_inputs_truncated]
A__ = feature_extractor(__lowerCAmelCase , return_tensors="""np""" ).input_features
A__ = feature_extractor(__lowerCAmelCase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-3 ) )
def a_ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
import torch
A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ = np.random.rand(1_00 , 32 ).astype(np.floataa )
A__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
A__ = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def a_ ( self : List[Any] , __lowerCAmelCase : Any ) -> Dict:
"""simple docstring"""
A__ = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
A__ = ds.sort("""id""" ).select(range(__lowerCAmelCase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a_ ( self : Dict ) -> Any:
"""simple docstring"""
A__ = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
A__ = self._load_datasamples(1 )
A__ = WhisperFeatureExtractor()
A__ = feature_extractor(__lowerCAmelCase , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 30_00) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __lowerCAmelCase , atol=1e-4 ) )
def a_ ( self : Dict ) -> Tuple:
"""simple docstring"""
A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ = self._load_datasamples(1 )[0]
A__ = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_55_35 # Rescale to [0, 65535] to show issue
A__ = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__lowerCAmelCase )[0]
self.assertTrue(np.all(np.mean(__lowerCAmelCase ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__lowerCAmelCase ) - 1 ) < 1e-3 ) )
| 276 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : int = (IPNDMScheduler,)
UpperCamelCase : int = (('''num_inference_steps''', 50),)
def _lowercase ( self : Union[str, Any] , **UpperCAmelCase__ : Tuple ) -> int:
_a : Optional[int] = {"""num_train_timesteps""": 1000}
config.update(**UpperCAmelCase__ )
return config
def _lowercase ( self : Dict , UpperCAmelCase__ : Any=0 , **UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
_a : Optional[int] = dict(self.forward_default_kwargs )
_a : Dict = kwargs.pop("""num_inference_steps""" , UpperCAmelCase__ )
_a : Optional[Any] = self.dummy_sample
_a : Union[str, Any] = 0.1 * sample
_a : Union[str, Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
_a : Optional[int] = self.get_scheduler_config(**UpperCAmelCase__ )
_a : Union[str, Any] = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residuals
_a : Any = dummy_past_residuals[:]
if time_step is None:
_a : str = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase__ )
_a : Union[str, Any] = scheduler_class.from_pretrained(UpperCAmelCase__ )
new_scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residuals
_a : Optional[Any] = dummy_past_residuals[:]
_a : List[Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : str = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_a : Optional[int] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : Tuple = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _lowercase ( self : Tuple ) -> List[str]:
pass
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : List[str]=0 , **UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
_a : Optional[Any] = dict(self.forward_default_kwargs )
_a : Optional[Any] = kwargs.pop("""num_inference_steps""" , UpperCAmelCase__ )
_a : Optional[Any] = self.dummy_sample
_a : List[Any] = 0.1 * sample
_a : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
_a : Union[str, Any] = self.get_scheduler_config()
_a : Optional[Any] = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
_a : Any = dummy_past_residuals[:]
if time_step is None:
_a : List[Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase__ )
_a : Any = scheduler_class.from_pretrained(UpperCAmelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residual (must be after setting timesteps)
_a : Optional[Any] = dummy_past_residuals[:]
_a : List[str] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : Tuple = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_a : Union[str, Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : int = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _lowercase ( self : str , **UpperCAmelCase__ : Any ) -> List[str]:
_a : Optional[int] = self.scheduler_classes[0]
_a : Optional[Any] = self.get_scheduler_config(**UpperCAmelCase__ )
_a : Union[str, Any] = scheduler_class(**UpperCAmelCase__ )
_a : int = 10
_a : List[Any] = self.dummy_model()
_a : str = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
_a : str = model(UpperCAmelCase__ , UpperCAmelCase__ )
_a : List[Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_a : Union[str, Any] = model(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Any = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
return sample
def _lowercase ( self : int ) -> str:
_a : Dict = dict(self.forward_default_kwargs )
_a : int = kwargs.pop("""num_inference_steps""" , UpperCAmelCase__ )
for scheduler_class in self.scheduler_classes:
_a : Optional[int] = self.get_scheduler_config()
_a : Tuple = scheduler_class(**UpperCAmelCase__ )
_a : Tuple = self.dummy_sample
_a : Optional[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCAmelCase__ , """set_timesteps""" ):
scheduler.set_timesteps(UpperCAmelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCAmelCase__ , """set_timesteps""" ):
_a : List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_a : Union[str, Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
_a : Optional[Any] = dummy_past_residuals[:]
_a : Optional[Any] = scheduler.timesteps[5]
_a : str = scheduler.timesteps[6]
_a : Optional[int] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : Union[str, Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_a : Tuple = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : List[str] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _lowercase ( self : List[str] ) -> List[str]:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__ , time_step=UpperCAmelCase__ )
def _lowercase ( self : List[str] ) -> List[str]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=UpperCAmelCase__ , time_step=UpperCAmelCase__ )
def _lowercase ( self : int ) -> List[Any]:
_a : str = self.full_loop()
_a : List[Any] = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 294 | 1 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase = {
'''facebook/mask2former-swin-small-coco-instance''': (
'''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'''
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__lowerCAmelCase = logging.get_logger(__name__)
class __a ( __UpperCamelCase ):
__lowercase : Optional[Any] = 'mask2former'
__lowercase : Any = ['swin']
__lowercase : Union[str, Any] = {'hidden_size': 'hidden_dim'}
def __init__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = 256 , lowerCAmelCase__ = 256 , lowerCAmelCase__ = 256 , lowerCAmelCase__ = 1_024 , lowerCAmelCase__ = "relu" , lowerCAmelCase__ = 6 , lowerCAmelCase__ = 10 , lowerCAmelCase__ = 8 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 2_048 , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = 4 , lowerCAmelCase__ = 255 , lowerCAmelCase__ = 100 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 2.0 , lowerCAmelCase__ = 5.0 , lowerCAmelCase__ = 5.0 , lowerCAmelCase__ = 12_544 , lowerCAmelCase__ = 3.0 , lowerCAmelCase__ = 0.7_5 , lowerCAmelCase__ = 0.0_2 , lowerCAmelCase__ = 1.0 , lowerCAmelCase__ = True , lowerCAmelCase__ = [4, 8, 16, 32] , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Optional[Any]:
'''simple docstring'''
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
lowercase__: Dict = CONFIG_MAPPING['swin'](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowerCAmelCase__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase__: Any = backbone_config.pop('model_type' )
lowercase__: Optional[Any] = CONFIG_MAPPING[backbone_model_type]
lowercase__: Tuple = config_class.from_dict(lowerCAmelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '
F'Supported model types: {",".join(self.backbones_supported )}' )
lowercase__: Optional[Any] = backbone_config
lowercase__: str = feature_size
lowercase__: Optional[int] = mask_feature_size
lowercase__: Union[str, Any] = hidden_dim
lowercase__: List[Any] = encoder_feedforward_dim
lowercase__: Optional[Any] = activation_function
lowercase__: Tuple = encoder_layers
lowercase__: Optional[int] = decoder_layers
lowercase__: Optional[Any] = num_attention_heads
lowercase__: str = dropout
lowercase__: List[str] = dim_feedforward
lowercase__: Any = pre_norm
lowercase__: Optional[Any] = enforce_input_projection
lowercase__: int = common_stride
lowercase__: List[Any] = ignore_value
lowercase__: Union[str, Any] = num_queries
lowercase__: Union[str, Any] = no_object_weight
lowercase__: List[Any] = class_weight
lowercase__: str = mask_weight
lowercase__: Optional[int] = dice_weight
lowercase__: Any = train_num_points
lowercase__: Union[str, Any] = oversample_ratio
lowercase__: int = importance_sample_ratio
lowercase__: int = init_std
lowercase__: Optional[Any] = init_xavier_std
lowercase__: Tuple = use_auxiliary_loss
lowercase__: Any = feature_strides
lowercase__: str = output_auxiliary_logits
lowercase__: Union[str, Any] = decoder_layers
super().__init__(**lowerCAmelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
return cls(
backbone_config=lowerCAmelCase__ , **lowerCAmelCase__ , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict[str, any]:
'''simple docstring'''
lowercase__: int = copy.deepcopy(self.__dict__ )
lowercase__: Optional[int] = self.backbone_config.to_dict()
lowercase__: Union[str, Any] = self.__class__.model_type
return output
| 288 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__lowerCAmelCase = logging.get_logger(__name__)
class __a ( __UpperCamelCase ):
__lowercase : Tuple = ['input_values', 'attention_mask']
def __init__( self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 16_000 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = False , lowerCAmelCase__ = 80 , lowerCAmelCase__ = 16 , lowerCAmelCase__ = 64 , lowerCAmelCase__ = "hann_window" , lowerCAmelCase__ = 1.0 , lowerCAmelCase__ = 80 , lowerCAmelCase__ = 7_600 , lowerCAmelCase__ = 1E-10 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = True , **lowerCAmelCase__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(feature_size=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , padding_value=lowerCAmelCase__ , **lowerCAmelCase__ )
lowercase__: Dict = do_normalize
lowercase__: Optional[Any] = return_attention_mask
lowercase__: str = num_mel_bins
lowercase__: Dict = hop_length
lowercase__: Dict = win_length
lowercase__: Optional[int] = win_function
lowercase__: Any = frame_signal_scale
lowercase__: Tuple = fmin
lowercase__: Tuple = fmax
lowercase__: Dict = mel_floor
lowercase__: int = reduction_factor
lowercase__: List[Any] = win_length * sampling_rate // 1_000
lowercase__: Optional[Any] = hop_length * sampling_rate // 1_000
lowercase__: Optional[int] = optimal_fft_length(self.sample_size )
lowercase__: Optional[Any] = (self.n_fft // 2) + 1
lowercase__: str = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowerCAmelCase__ )
lowercase__: Optional[Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , lowerCAmelCase__ , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , lowerCAmelCase__ , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
lowercase__: List[str] = np.array(lowerCAmelCase__ , np.intaa )
lowercase__: Tuple = []
for vector, length in zip(lowerCAmelCase__ , attention_mask.sum(-1 ) ):
lowercase__: int = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
lowercase__: Tuple = padding_value
normed_input_values.append(lowerCAmelCase__ )
else:
lowercase__: Union[str, Any] = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , ) -> np.ndarray:
'''simple docstring'''
lowercase__: List[str] = spectrogram(
lowerCAmelCase__ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> BatchFeature:
'''simple docstring'''
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if audio is not None:
lowercase__: Dict = self._process_audio(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ , )
else:
lowercase__: str = None
if audio_target is not None:
lowercase__: List[str] = self._process_audio(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ , )
if inputs is None:
return inputs_target
else:
lowercase__: int = inputs_target['input_values']
lowercase__: List[str] = inputs_target.get('attention_mask' )
if decoder_attention_mask is not None:
lowercase__: Optional[int] = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> BatchFeature:
'''simple docstring'''
lowercase__: int = isinstance(lowerCAmelCase__ , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
lowercase__: Tuple = is_batched_numpy or (
isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__: Dict = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(lowerCAmelCase__ , np.ndarray ):
lowercase__: Optional[Any] = np.asarray(lowerCAmelCase__ , dtype=np.floataa )
elif isinstance(lowerCAmelCase__ , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
lowercase__: Optional[Any] = speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__: Optional[int] = [speech]
# needed to make pad() work on spectrogram inputs
lowercase__: str = self.feature_size
# convert into correct format for padding
if is_target:
lowercase__: int = [self._extract_mel_features(lowerCAmelCase__ ) for waveform in speech]
lowercase__: Dict = BatchFeature({'input_values': features} )
lowercase__: Union[str, Any] = self.num_mel_bins
else:
lowercase__: Union[str, Any] = BatchFeature({'input_values': speech} )
lowercase__: Dict = self.pad(
lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
lowercase__: List[str] = feature_size_hack
# convert input values to correct format
lowercase__: Union[str, Any] = padded_inputs['input_values']
if not isinstance(input_values[0] , np.ndarray ):
lowercase__: List[Any] = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(lowerCAmelCase__ , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
lowercase__: Dict = [array.astype(np.floataa ) for array in input_values]
elif isinstance(lowerCAmelCase__ , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
lowercase__: Tuple = input_values.astype(np.floataa )
# convert attention_mask to correct format
lowercase__: Tuple = padded_inputs.get('attention_mask' )
if attention_mask is not None:
lowercase__: str = [np.asarray(lowerCAmelCase__ , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
lowercase__: Tuple = (
attention_mask
if self._get_padding_strategies(lowerCAmelCase__ , max_length=lowerCAmelCase__ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowercase__: str = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=lowerCAmelCase__ , padding_value=self.padding_value )
if return_tensors is not None:
lowercase__: Union[str, Any] = padded_inputs.convert_to_tensors(lowerCAmelCase__ )
return padded_inputs
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict[str, Any]:
'''simple docstring'''
lowercase__: int = super().to_dict()
# Don't serialize these as they are derived from the other properties.
lowercase__: str = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 288 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : List[str] = logging.get_logger(__name__)
snake_case : Tuple = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'beit'
def __init__( self , _lowerCamelCase=8192 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-12 , _lowerCamelCase=224 , _lowerCamelCase=16 , _lowerCamelCase=3 , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=True , _lowerCamelCase=[3, 5, 7, 11] , _lowerCamelCase=[1, 2, 3, 6] , _lowerCamelCase=True , _lowerCamelCase=0.4 , _lowerCamelCase=256 , _lowerCamelCase=1 , _lowerCamelCase=False , _lowerCamelCase=255 , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
a :Any = vocab_size
a :Optional[int] = hidden_size
a :Dict = num_hidden_layers
a :str = num_attention_heads
a :Dict = intermediate_size
a :Optional[Any] = hidden_act
a :Tuple = hidden_dropout_prob
a :List[Any] = attention_probs_dropout_prob
a :List[str] = initializer_range
a :int = layer_norm_eps
a :List[Any] = image_size
a :Dict = patch_size
a :Optional[int] = num_channels
a :Union[str, Any] = use_mask_token
a :Any = use_absolute_position_embeddings
a :List[str] = use_relative_position_bias
a :List[Any] = use_shared_relative_position_bias
a :Tuple = layer_scale_init_value
a :Any = drop_path_rate
a :Any = use_mean_pooling
# decode head attributes (semantic segmentation)
a :List[Any] = out_indices
a :List[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
a :Optional[int] = use_auxiliary_head
a :Union[str, Any] = auxiliary_loss_weight
a :Union[str, Any] = auxiliary_channels
a :List[str] = auxiliary_num_convs
a :List[Any] = auxiliary_concat_input
a :Dict = semantic_loss_ignore_index
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 1e-4
| 94 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]=True ):
"""simple docstring"""
model.train()
a :str = model(UpperCAmelCase_ )
a :List[str] = F.mse_loss(UpperCAmelCase_ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(UpperCAmelCase_ )
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int=False ):
"""simple docstring"""
set_seed(42 )
a :List[Any] = RegressionModel()
a :Any = deepcopy(UpperCAmelCase_ )
a :Tuple = RegressionDataset(length=80 )
a :Tuple = DataLoader(UpperCAmelCase_ , batch_size=16 )
model.to(accelerator.device )
if sched:
a :str = AdamW(params=model.parameters() , lr=1E-3 )
a :str = AdamW(params=ddp_model.parameters() , lr=1E-3 )
a :List[str] = LambdaLR(UpperCAmelCase_ , lr_lambda=lambda UpperCAmelCase_ : epoch**0.65 )
a :List[str] = LambdaLR(UpperCAmelCase_ , lr_lambda=lambda UpperCAmelCase_ : epoch**0.65 )
# Make a copy of `model`
if sched:
a , a , a , a :List[Any] = accelerator.prepare(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
a , a :str = accelerator.prepare(UpperCAmelCase_ , UpperCAmelCase_ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
a , a , a :str = get_training_setup(UpperCAmelCase_ )
# Use a single batch
a , a :Dict = next(iter(UpperCAmelCase_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
a , a :int = accelerator.gather((ddp_input, ddp_target) )
a , a :Union[str, Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCAmelCase_ ):
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
# Sync grads
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
a :Union[str, Any] = ddp_input[torch.randperm(len(UpperCAmelCase_ ) )]
def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
a , a , a :List[str] = get_training_setup(UpperCAmelCase_ )
# Use a single batch
a , a :List[str] = next(iter(UpperCAmelCase_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
a , a :List[Any] = accelerator.gather((ddp_input, ddp_target) )
a , a :Union[str, Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCAmelCase_ ):
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
# Sync grads
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
a :Any = ddp_input[torch.randperm(len(UpperCAmelCase_ ) )]
def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : int=False ):
"""simple docstring"""
a :Optional[int] = Accelerator(
split_batches=UpperCAmelCase_ , dispatch_batches=UpperCAmelCase_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
a , a , a :List[str] = get_training_setup(UpperCAmelCase_ )
for iteration, batch in enumerate(UpperCAmelCase_ ):
a , a :List[Any] = batch.values()
# Gather the distributed inputs and targs for the base model
a , a :List[str] = accelerator.gather((ddp_input, ddp_target) )
a , a :List[str] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(UpperCAmelCase_ ):
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCAmelCase_ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
a :List[str] = ddp_input[torch.randperm(len(UpperCAmelCase_ ) )]
GradientState._reset_state()
def __lowerCamelCase ( UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[int]=False ):
"""simple docstring"""
a :Optional[Any] = Accelerator(
split_batches=UpperCAmelCase_ , dispatch_batches=UpperCAmelCase_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
a , a , a , a , a , a , a :Optional[Any] = get_training_setup(UpperCAmelCase_ , UpperCAmelCase_ )
for iteration, batch in enumerate(UpperCAmelCase_ ):
a , a :int = batch.values()
# Gather the distributed inputs and targs for the base model
a , a :List[str] = accelerator.gather((ddp_input, ddp_target) )
a , a :str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCAmelCase_ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(UpperCAmelCase_ ):
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
a :Tuple = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCAmelCase_ ))
if accelerator.num_processes > 1:
check_model_parameters(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def __lowerCamelCase ( ):
"""simple docstring"""
a :Optional[Any] = Accelerator()
a :int = RegressionDataset(length=80 )
a :List[str] = DataLoader(UpperCAmelCase_ , batch_size=16 )
a :List[Any] = RegressionDataset(length=96 )
a :Any = DataLoader(UpperCAmelCase_ , batch_size=16 )
a , a :Optional[int] = accelerator.prepare(UpperCAmelCase_ , UpperCAmelCase_ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(UpperCAmelCase_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCAmelCase_ )
if iteration < len(UpperCAmelCase_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(UpperCAmelCase_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCAmelCase_ )
if batch_num < len(UpperCAmelCase_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __lowerCamelCase ( ):
"""simple docstring"""
a :Optional[int] = Accelerator()
a :Optional[int] = accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(UpperCAmelCase_ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(UpperCAmelCase_ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(UpperCAmelCase_ , UpperCAmelCase_ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(UpperCAmelCase_ , UpperCAmelCase_ )
def __lowerCamelCase ( UpperCAmelCase_ : Tuple ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 94 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCamelCase :Optional[str] = field(
default='''NER''' , metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCamelCase :bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :str = field(
metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''} , )
lowerCamelCase :int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowerCamelCase :bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def snake_case ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(""".json"""):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
_A , _A , _A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""")
_A = import_module("""tasks""")
try:
_A = getattr(snake_case__ , model_args.task_type)
_A = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''')
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , snake_case__)
# Set seed
set_seed(training_args.seed)
# Prepare CONLL-2003 task
_A = token_classification_task.get_labels(data_args.labels)
_A = dict(enumerate(snake_case__))
_A = len(snake_case__)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=snake_case__ , idalabel=snake_case__ , labelaid={label: i for i, label in enumerate(snake_case__)} , cache_dir=model_args.cache_dir , )
_A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
_A = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path) , config=snake_case__ , cache_dir=model_args.cache_dir , )
# Get datasets
_A = (
TokenClassificationDataset(
token_classification_task=snake_case__ , data_dir=data_args.data_dir , tokenizer=snake_case__ , labels=snake_case__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_A = (
TokenClassificationDataset(
token_classification_task=snake_case__ , data_dir=data_args.data_dir , tokenizer=snake_case__ , labels=snake_case__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(snake_case__ :np.ndarray , snake_case__ :np.ndarray) -> Tuple[List[int], List[int]]:
_A = np.argmax(snake_case__ , axis=2)
_A , _A = preds.shape
_A = [[] for _ in range(snake_case__)]
_A = [[] for _ in range(snake_case__)]
for i in range(snake_case__):
for j in range(snake_case__):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
return preds_list, out_label_list
def compute_metrics(snake_case__ :EvalPrediction) -> Dict:
_A , _A = align_predictions(p.predictions , p.label_ids)
return {
"accuracy_score": accuracy_score(snake_case__ , snake_case__),
"precision": precision_score(snake_case__ , snake_case__),
"recall": recall_score(snake_case__ , snake_case__),
"f1": fa_score(snake_case__ , snake_case__),
}
# Data collator
_A = DataCollatorWithPadding(snake_case__ , pad_to_multiple_of=8) if training_args.fpaa else None
# Initialize our Trainer
_A = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , compute_metrics=snake_case__ , data_collator=snake_case__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_A = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""")
_A = trainer.evaluate()
_A = os.path.join(training_args.output_dir , """eval_results.txt""")
if trainer.is_world_process_zero():
with open(snake_case__ , """w""") as writer:
logger.info("""***** Eval results *****""")
for key, value in result.items():
logger.info(""" %s = %s""" , snake_case__ , snake_case__)
writer.write("""%s = %s\n""" % (key, value))
results.update(snake_case__)
# Predict
if training_args.do_predict:
_A = TokenClassificationDataset(
token_classification_task=snake_case__ , data_dir=data_args.data_dir , tokenizer=snake_case__ , labels=snake_case__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
_A , _A , _A = trainer.predict(snake_case__)
_A , _A = align_predictions(snake_case__ , snake_case__)
_A = os.path.join(training_args.output_dir , """test_results.txt""")
if trainer.is_world_process_zero():
with open(snake_case__ , """w""") as writer:
for key, value in metrics.items():
logger.info(""" %s = %s""" , snake_case__ , snake_case__)
writer.write("""%s = %s\n""" % (key, value))
# Save predictions
_A = os.path.join(training_args.output_dir , """test_predictions.txt""")
if trainer.is_world_process_zero():
with open(snake_case__ , """w""") as writer:
with open(os.path.join(data_args.data_dir , """test.txt""") , """r""") as f:
token_classification_task.write_predictions_to_file(snake_case__ , snake_case__ , snake_case__)
return results
def snake_case ( snake_case__ :Optional[Any]) -> int:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 81 | import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :int = (UnCLIPScheduler,)
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> List[Any]:
_A = {
"""num_train_timesteps""": 10_00,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**lowerCAmelCase_ )
return config
def UpperCAmelCase ( self ) -> Union[str, Any]:
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> int:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[Any]:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Any:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[int]:
for time_step in [0, 5_00, 9_99]:
for prev_timestep in [None, 5, 1_00, 2_50, 5_00, 7_50]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowerCAmelCase_ , prev_timestep=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(variance_type="""fixed_small_log""" )
_A = scheduler_class(**lowerCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.054_9625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.999_4987 ) ) < 1E-5
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(variance_type="""learned_range""" )
_A = scheduler_class(**lowerCAmelCase_ )
_A = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowerCAmelCase_ ) - -10.171_2790 < 1E-5
assert scheduler._get_variance(4_87 , predicted_variance=lowerCAmelCase_ ) - -5.799_8052 < 1E-5
assert scheduler._get_variance(9_99 , predicted_variance=lowerCAmelCase_ ) - -0.001_0011 < 1E-5
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**lowerCAmelCase_ )
_A = scheduler.timesteps
_A = self.dummy_model()
_A = self.dummy_sample_deter
_A = torch.manual_seed(0 )
for i, t in enumerate(lowerCAmelCase_ ):
# 1. predict noise residual
_A = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_A = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_A = pred_prev_sample
_A = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1E-2
assert abs(result_mean.item() - 0.328_4743 ) < 1E-3
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**lowerCAmelCase_ )
scheduler.set_timesteps(25 )
_A = scheduler.timesteps
_A = self.dummy_model()
_A = self.dummy_sample_deter
_A = torch.manual_seed(0 )
for i, t in enumerate(lowerCAmelCase_ ):
# 1. predict noise residual
_A = model(lowerCAmelCase_ , lowerCAmelCase_ )
if i + 1 == timesteps.shape[0]:
_A = None
else:
_A = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_A = scheduler.step(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , prev_timestep=lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_A = pred_prev_sample
_A = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1E-2
assert abs(result_mean.item() - 0.336_2038 ) < 1E-3
def UpperCAmelCase ( self ) -> Dict:
pass
def UpperCAmelCase ( self ) -> List[Any]:
pass
| 81 | 1 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _UpperCamelCase ( __A ) -> tuple:
'''simple docstring'''
return (data["data"], data["target"])
def _UpperCamelCase ( __A , __A , __A ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowerCAmelCase__ , lowerCAmelCase__ )
# Predict target for test data
UpperCamelCase__ = xgb.predict(lowerCAmelCase__ )
UpperCamelCase__ = predictions.reshape(len(lowerCAmelCase__ ) , 1 )
return predictions
def _UpperCamelCase ( ) -> None:
'''simple docstring'''
UpperCamelCase__ = fetch_california_housing()
UpperCamelCase__ , UpperCamelCase__ = data_handling(lowerCAmelCase__ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = train_test_split(
lowerCAmelCase__ , lowerCAmelCase__ , test_size=0.25 , random_state=1 )
UpperCamelCase__ = xgboost(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Error printing
print(F'''Mean Absolute Error : {mean_absolute_error(lowerCAmelCase__ , lowerCAmelCase__ )}''' )
print(F'''Mean Square Error : {mean_squared_error(lowerCAmelCase__ , lowerCAmelCase__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 80 | """simple docstring"""
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
__lowerCAmelCase : Union[str, Any] =0B1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
__lowerCAmelCase : Optional[int] =[int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class _A :
def __init__( self ):
"""simple docstring"""
lowercase = WATERMARK_BITS
lowercase = WatermarkEncoder()
self.encoder.set_watermark("""bits""" , self.watermark )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if images.shape[-1] < 256:
return images
lowercase = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase = [self.encoder.encode(__lowerCAmelCase , """dwtDct""" ) for image in images]
lowercase = torch.from_numpy(np.array(__lowerCAmelCase ) ).permute(0 , 3 , 1 , 2 )
lowercase = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 197 | 0 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def a__ ( UpperCAmelCase : list , UpperCAmelCase : list , UpperCAmelCase : list , UpperCAmelCase : list , UpperCAmelCase : list ) -> float:
UpperCAmelCase : Optional[Any] = np.array([[1, item, train_mtch[i]] for i, item in enumerate(UpperCAmelCase )] )
UpperCAmelCase : Optional[int] = np.array(UpperCAmelCase )
UpperCAmelCase : Dict = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , UpperCAmelCase ) ) , x.transpose() ) , UpperCAmelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def a__ ( UpperCAmelCase : list , UpperCAmelCase : list , UpperCAmelCase : list ) -> float:
UpperCAmelCase : Dict = (1, 2, 1)
UpperCAmelCase : List[Any] = (1, 1, 0, 7)
UpperCAmelCase : str = SARIMAX(
UpperCAmelCase , exog=UpperCAmelCase , order=UpperCAmelCase , seasonal_order=UpperCAmelCase )
UpperCAmelCase : List[Any] = model.fit(disp=UpperCAmelCase , maxiter=600 , method='''nm''' )
UpperCAmelCase : Optional[int] = model_fit.predict(1 , len(UpperCAmelCase ) , exog=[test_match] )
return result[0]
def a__ ( UpperCAmelCase : list , UpperCAmelCase : list , UpperCAmelCase : list ) -> float:
UpperCAmelCase : Optional[int] = SVR(kernel='''rbf''' , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(UpperCAmelCase , UpperCAmelCase )
UpperCAmelCase : Union[str, Any] = regressor.predict(UpperCAmelCase )
return y_pred[0]
def a__ ( UpperCAmelCase : list ) -> float:
train_user.sort()
UpperCAmelCase : List[str] = np.percentile(UpperCAmelCase , 25 )
UpperCAmelCase : int = np.percentile(UpperCAmelCase , 75 )
UpperCAmelCase : Optional[int] = qa - qa
UpperCAmelCase : int = qa - (iqr * 0.1)
return low_lim
def a__ ( UpperCAmelCase : list , UpperCAmelCase : float ) -> bool:
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : Optional[int] = 0
for i in list_vote:
if i > actual_result:
UpperCAmelCase : Union[str, Any] = not_safe + 1
else:
if abs(abs(UpperCAmelCase ) - abs(UpperCAmelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
_lowerCamelCase : int = [[1_8_2_3_1, 0.0, 1], [2_2_6_2_1, 1.0, 2], [1_5_6_7_5, 0.0, 3], [2_3_5_8_3, 1.0, 4]]
_lowerCamelCase : Optional[int] = pd.DataFrame(
data_input, columns=["total_user", "total_even", "days"]
)
_lowerCamelCase : Optional[Any] = Normalizer().fit_transform(data_input_df.values)
# split data
_lowerCamelCase : Optional[Any] = normalize_df[:, 2].tolist()
_lowerCamelCase : Tuple = normalize_df[:, 0].tolist()
_lowerCamelCase : Tuple = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
_lowerCamelCase : Dict = normalize_df[:, [1, 2]].tolist()
_lowerCamelCase : int = x[: len(x) - 1]
_lowerCamelCase : List[str] = x[len(x) - 1 :]
# for linear regression & sarimax
_lowerCamelCase : int = total_date[: len(total_date) - 1]
_lowerCamelCase : Dict = total_user[: len(total_user) - 1]
_lowerCamelCase : Tuple = total_match[: len(total_match) - 1]
_lowerCamelCase : Optional[int] = total_date[len(total_date) - 1 :]
_lowerCamelCase : List[Any] = total_user[len(total_user) - 1 :]
_lowerCamelCase : Any = total_match[len(total_match) - 1 :]
# voting system with forecasting
_lowerCamelCase : Union[str, Any] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
_lowerCamelCase : str = "" if data_safety_checker(res_vote, tst_user) else "not "
print("Today's data is {not_str}safe.")
| 366 |
import torch
from torch import nn
class __UpperCAmelCase ( nn.Module ):
def __init__( self : List[Any], __A : List[Any], __A : Optional[Any], __A : int, __A : List[Any], __A : int=1, __A : List[str]=False ):
super().__init__()
UpperCAmelCase : Union[str, Any] = n_token
UpperCAmelCase : List[str] = d_embed
UpperCAmelCase : Dict = d_proj
UpperCAmelCase : List[Any] = cutoffs + [n_token]
UpperCAmelCase : Dict = [0] + self.cutoffs
UpperCAmelCase : int = div_val
UpperCAmelCase : Union[str, Any] = self.cutoffs[0]
UpperCAmelCase : str = len(self.cutoffs ) - 1
UpperCAmelCase : Optional[int] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCAmelCase : str = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed ) )
UpperCAmelCase : List[str] = nn.Parameter(torch.zeros(self.n_clusters ) )
UpperCAmelCase : Dict = nn.ModuleList()
UpperCAmelCase : Optional[int] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(__A, __A ) ) )
else:
self.out_projs.append(__A )
self.out_layers.append(nn.Linear(__A, __A ) )
else:
for i in range(len(self.cutoffs ) ):
UpperCAmelCase , UpperCAmelCase : Dict = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase : str = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(__A, __A ) ) )
self.out_layers.append(nn.Linear(__A, r_idx - l_idx ) )
UpperCAmelCase : Optional[int] = keep_order
def __magic_name__ ( self : Union[str, Any], __A : List[str], __A : Any, __A : Dict, __A : Optional[Any] ):
if proj is None:
UpperCAmelCase : List[Any] = nn.functional.linear(__A, __A, bias=__A )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCAmelCase : Union[str, Any] = nn.functional.linear(__A, proj.t().contiguous() )
UpperCAmelCase : Optional[int] = nn.functional.linear(__A, __A, bias=__A )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def __magic_name__ ( self : int, __A : int, __A : List[Any]=None, __A : Dict=False ):
if labels is not None:
# Shift so that tokens < n predict n
UpperCAmelCase : List[Any] = hidden[..., :-1, :].contiguous()
UpperCAmelCase : Any = labels[..., 1:].contiguous()
UpperCAmelCase : Optional[Any] = hidden.view(-1, hidden.size(-1 ) )
UpperCAmelCase : Union[str, Any] = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
UpperCAmelCase : str = hidden.view(-1, hidden.size(-1 ) )
if self.n_clusters == 0:
UpperCAmelCase : List[str] = self._compute_logit(__A, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] )
if labels is not None:
UpperCAmelCase : Optional[int] = labels != -1_0_0
UpperCAmelCase : Dict = torch.zeros_like(__A, dtype=hidden.dtype, device=hidden.device )
UpperCAmelCase : Any = (
-nn.functional.log_softmax(__A, dim=-1 )[mask].gather(1, labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
UpperCAmelCase : Any = nn.functional.log_softmax(__A, dim=-1 )
else:
# construct weights and biases
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCAmelCase , UpperCAmelCase : List[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase : List[str] = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase : Optional[int] = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase : List[Any] = self.out_layers[i].weight
UpperCAmelCase : Dict = self.out_layers[i].bias
if i == 0:
UpperCAmelCase : List[str] = torch.cat([weight_i, self.cluster_weight], dim=0 )
UpperCAmelCase : List[Any] = torch.cat([bias_i, self.cluster_bias], dim=0 )
weights.append(__A )
biases.append(__A )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase : Dict = self._compute_logit(__A, __A, __A, __A )
UpperCAmelCase : int = nn.functional.log_softmax(__A, dim=1 )
if labels is None:
UpperCAmelCase : Optional[int] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
UpperCAmelCase : Union[str, Any] = torch.zeros_like(__A, dtype=hidden.dtype, device=hidden.device )
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : Any = [0] + self.cutoffs
for i in range(len(__A ) - 1 ):
UpperCAmelCase , UpperCAmelCase : Optional[int] = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCAmelCase : List[str] = (labels >= l_idx) & (labels < r_idx)
UpperCAmelCase : Tuple = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCAmelCase : Any = labels.index_select(0, __A ) - l_idx
UpperCAmelCase : Dict = head_logprob.index_select(0, __A )
UpperCAmelCase : List[str] = hidden.index_select(0, __A )
else:
UpperCAmelCase : Tuple = hidden
if i == 0:
if labels is not None:
UpperCAmelCase : Union[str, Any] = head_logprob_i.gather(1, target_i[:, None] ).squeeze(1 )
else:
UpperCAmelCase : Optional[int] = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase : List[str] = self._compute_logit(__A, __A, __A, __A )
UpperCAmelCase : Dict = nn.functional.log_softmax(__A, dim=1 )
UpperCAmelCase : int = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCAmelCase : Union[str, Any] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1, target_i[:, None] ).squeeze(1 )
else:
UpperCAmelCase : int = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCAmelCase : Optional[Any] = logprob_i
if labels is not None:
if (hasattr(self, '''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0, __A, -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def __magic_name__ ( self : Tuple, __A : List[str] ):
if self.n_clusters == 0:
UpperCAmelCase : int = self._compute_logit(__A, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] )
return nn.functional.log_softmax(__A, dim=-1 )
else:
# construct weights and biases
UpperCAmelCase , UpperCAmelCase : Any = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCAmelCase , UpperCAmelCase : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase : List[Any] = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase : List[str] = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase : List[str] = self.out_layers[i].weight
UpperCAmelCase : Dict = self.out_layers[i].bias
if i == 0:
UpperCAmelCase : Dict = torch.cat([weight_i, self.cluster_weight], dim=0 )
UpperCAmelCase : str = torch.cat([bias_i, self.cluster_bias], dim=0 )
weights.append(__A )
biases.append(__A )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase : int = self._compute_logit(__A, __A, __A, __A )
UpperCAmelCase : Optional[int] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
UpperCAmelCase : Dict = nn.functional.log_softmax(__A, dim=1 )
UpperCAmelCase : List[str] = [0] + self.cutoffs
for i in range(len(__A ) - 1 ):
UpperCAmelCase , UpperCAmelCase : Optional[Any] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCAmelCase : Any = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase : Tuple = self._compute_logit(__A, __A, __A, __A )
UpperCAmelCase : List[Any] = nn.functional.log_softmax(__A, dim=1 )
UpperCAmelCase : Optional[int] = head_logprob[:, -i] + tail_logprob_i
UpperCAmelCase : Optional[Any] = logprob_i
return out
| 99 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.