code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from math import factorial
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , a : Optional[int] , a : Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = real
if isinstance(a , a ):
SCREAMING_SNAKE_CASE : Dict = [1] * rank
else:
SCREAMING_SNAKE_CASE : Optional[Any] = rank
def __repr__( self : List[Any] ) -> str:
"""simple docstring"""
return (
F"{self.real}+"
F"{'+'.join(str(a )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"
)
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , a )
def __add__( self : Tuple , a : List[str] ) -> List[str]:
"""simple docstring"""
if not isinstance(a , a ):
return Dual(self.real + other , self.duals )
SCREAMING_SNAKE_CASE : List[str] = self.duals.copy()
SCREAMING_SNAKE_CASE : Dict = other.duals.copy()
if len(a ) > len(a ):
o_dual.extend([1] * (len(a ) - len(a )) )
elif len(a ) < len(a ):
s_dual.extend([1] * (len(a ) - len(a )) )
SCREAMING_SNAKE_CASE : int = []
for i in range(len(a ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , a )
lowerCamelCase__ =__add__
def __sub__( self : Optional[int] , a : int ) -> Dict:
"""simple docstring"""
return self + other * -1
def __mul__( self : Union[str, Any] , a : int ) -> int:
"""simple docstring"""
if not isinstance(a , a ):
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , a )
SCREAMING_SNAKE_CASE : List[Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , a )
lowerCamelCase__ =__mul__
def __truediv__( self : int , a : int ) -> Optional[int]:
"""simple docstring"""
if not isinstance(a , a ):
SCREAMING_SNAKE_CASE : int = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , a )
raise ValueError
def __floordiv__( self : List[Any] , a : Optional[int] ) -> List[Any]:
"""simple docstring"""
if not isinstance(a , a ):
SCREAMING_SNAKE_CASE : int = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , a )
raise ValueError
def __pow__( self : Optional[Any] , a : Dict ) -> Optional[Any]:
"""simple docstring"""
if n < 0 or isinstance(a , a ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
SCREAMING_SNAKE_CASE : List[str] = self
for _ in range(n - 1 ):
x *= self
return x
def lowerCamelCase__ ( _a , _a , _a):
if not callable(_a):
raise ValueError("differentiate() requires a function as input for func")
if not isinstance(_a , (float, int)):
raise ValueError("differentiate() requires a float as input for position")
if not isinstance(_a , _a):
raise ValueError("differentiate() requires an int as input for order")
SCREAMING_SNAKE_CASE : Optional[int] = Dual(_a , 1)
SCREAMING_SNAKE_CASE : List[str] = func(_a)
if order == 0:
return result.real
return result.duals[order - 1] * factorial(_a)
if __name__ == "__main__":
import doctest
doctest.testmod()
def lowerCamelCase__ ( _a):
return y**2 * y**4
print(differentiate(f, 9, 2)) | 25 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = ort.SessionOptions()
SCREAMING_SNAKE_CASE : Union[str, Any] = False
return options
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
SCREAMING_SNAKE_CASE : int = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Optional[Any] = "A red cat sitting on a park bench"
SCREAMING_SNAKE_CASE : Tuple = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(
prompt=a , image=a , mask_image=a , guidance_scale=7.5 , num_inference_steps=10 , generator=a , output_type="np" , )
SCREAMING_SNAKE_CASE : List[Any] = output.images
SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
SCREAMING_SNAKE_CASE : Union[str, Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=a , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : List[Any] = "A red cat sitting on a park bench"
SCREAMING_SNAKE_CASE : Dict = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe(
prompt=a , image=a , mask_image=a , guidance_scale=7.5 , num_inference_steps=20 , generator=a , output_type="np" , )
SCREAMING_SNAKE_CASE : List[str] = output.images
SCREAMING_SNAKE_CASE : Optional[int] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 | 25 | 1 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] , a : Optional[int] , a : str=99 , a : str=13 , a : List[Any]=7 , a : Optional[int]=9 , a : Optional[int]=True , a : Union[str, Any]=True , a : Any=False , a : Tuple=32 , a : List[str]=5 , a : Union[str, Any]=4 , a : Union[str, Any]=37 , a : str=8 , a : int=0.1 , a : Optional[int]=0.002 , a : Union[str, Any]=1 , a : Optional[int]=0 , a : Tuple=0 , a : Any=None , a : Optional[Any]=None , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = encoder_seq_length
SCREAMING_SNAKE_CASE : Tuple = decoder_seq_length
# For common tests
SCREAMING_SNAKE_CASE : int = self.decoder_seq_length
SCREAMING_SNAKE_CASE : int = is_training
SCREAMING_SNAKE_CASE : Any = use_attention_mask
SCREAMING_SNAKE_CASE : List[str] = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = d_ff
SCREAMING_SNAKE_CASE : Tuple = relative_attention_num_buckets
SCREAMING_SNAKE_CASE : List[Any] = dropout_rate
SCREAMING_SNAKE_CASE : int = initializer_factor
SCREAMING_SNAKE_CASE : List[Any] = eos_token_id
SCREAMING_SNAKE_CASE : int = pad_token_id
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_start_token_id
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Tuple = decoder_layers
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return TaConfig.from_pretrained("google/umt5-base" )
def __UpperCamelCase ( self : Optional[int] , a : List[str] , a : Optional[Any] , a : Optional[int] , a : Tuple=None , a : List[Any]=None , a : int=None , a : Any=None , a : Dict=None , ) -> List[str]:
"""simple docstring"""
if attention_mask is None:
SCREAMING_SNAKE_CASE : int = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE : Any = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
SCREAMING_SNAKE_CASE : Tuple = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=a )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=a )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=a )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
SCREAMING_SNAKE_CASE : int = input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE : Tuple = decoder_input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE : Any = self.get_config()
SCREAMING_SNAKE_CASE : Union[str, Any] = config.num_attention_heads
SCREAMING_SNAKE_CASE : Dict = self.prepare_inputs_dict(a , a , a )
return config, input_dict
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __UpperCamelCase ( self : Dict , a : List[str] , a : Dict , a : int , a : Tuple , a : List[str] , a : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = UMTaModel(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(
input_ids=a , decoder_input_ids=a , attention_mask=a , decoder_attention_mask=a , )
SCREAMING_SNAKE_CASE : Any = model(input_ids=a , decoder_input_ids=a )
SCREAMING_SNAKE_CASE : Optional[Any] = result.last_hidden_state
SCREAMING_SNAKE_CASE : List[str] = result.past_key_values
SCREAMING_SNAKE_CASE : List[str] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(a ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def __UpperCamelCase ( self : List[str] , a : Optional[int] , a : Tuple , a : Tuple , a : List[Any] , a : List[Any] , a : Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = UMTaModel(config=a ).get_decoder().to(a ).eval()
# first forward pass
SCREAMING_SNAKE_CASE : int = model(a , use_cache=a )
SCREAMING_SNAKE_CASE : Dict = model(a )
SCREAMING_SNAKE_CASE : str = model(a , use_cache=a )
self.parent.assertTrue(len(a ) == len(a ) )
self.parent.assertTrue(len(a ) == len(a ) + 1 )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
SCREAMING_SNAKE_CASE : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE : str = model(a )["last_hidden_state"]
SCREAMING_SNAKE_CASE : List[str] = model(a , past_key_values=a )["last_hidden_state"]
# select random slice
SCREAMING_SNAKE_CASE : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE : List[Any] = output_from_no_past[:, -1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE : Any = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1e-3 ) )
def __UpperCamelCase ( self : Tuple , a : Union[str, Any] , a : int , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = UMTaModel(config=a ).to(a ).half().eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**a )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(a ).any().item() )
@require_torch
class _UpperCamelCase ( __A , __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
lowerCamelCase__ =(UMTaForConditionalGeneration,) if is_torch_available() else ()
lowerCamelCase__ =(
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =True
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =True
lowerCamelCase__ =True
# The small UMT5 model needs higher percentages for CPU/MP tests
lowerCamelCase__ =[0.8, 0.9]
def __UpperCamelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def __UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : int = UMTaModel(config_and_inputs[0] ).to(a )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
a , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"{tmpdirname}/t5_test.onnx" , export_params=a , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*a )
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : Union[str, Any] = config_and_inputs[0]
SCREAMING_SNAKE_CASE : Any = UMTaForConditionalGeneration(a ).eval()
model.to(a )
SCREAMING_SNAKE_CASE : List[Any] = {
"head_mask": torch.zeros(config.num_layers , config.num_heads , device=a ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=a ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=a ),
}
for attn_name, (name, mask) in zip(a , head_masking.items() ):
SCREAMING_SNAKE_CASE : Optional[int] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
SCREAMING_SNAKE_CASE : List[Any] = torch.ones(
config.num_decoder_layers , config.num_heads , device=a )
SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(
config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=a , return_dict_in_generate=a , **a , )
# We check the state of decoder_attentions and cross_attentions just from the last step
SCREAMING_SNAKE_CASE : int = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=a ).to(a )
SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=a , legacy=a )
SCREAMING_SNAKE_CASE : int = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
SCREAMING_SNAKE_CASE : Dict = tokenizer(a , return_tensors="pt" , padding=a ).input_ids
# fmt: off
SCREAMING_SNAKE_CASE : int = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(a , a )
SCREAMING_SNAKE_CASE : Dict = model.generate(input_ids.to(a ) )
SCREAMING_SNAKE_CASE : List[Any] = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ <extra_id_56>ajลกietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajลกie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> ํผํด[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
SCREAMING_SNAKE_CASE : Dict = tokenizer.batch_decode(a )
self.assertEqual(a , a ) | 25 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCamelCase__ ( _a):
return getitem, k
def lowerCamelCase__ ( _a , _a):
return setitem, k, v
def lowerCamelCase__ ( _a):
return delitem, k
def lowerCamelCase__ ( _a , _a , *_a):
try:
return fun(_a , *_a), None
except Exception as e:
return None, e
a_ = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
a_ = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
a_ = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
a_ = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
a_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items"),
pytest.param(_overwrite_items , id="overwrite items"),
pytest.param(_delete_items , id="delete items"),
pytest.param(_access_absent_items , id="access absent items"),
pytest.param(_add_with_resize_up , id="add with resize up"),
pytest.param(_add_with_resize_down , id="add with resize down"),
) , )
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Dict = HashMap(initial_block_size=4)
SCREAMING_SNAKE_CASE : List[str] = {}
for _, (fun, *args) in enumerate(_a):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = _run_operation(_a , _a , *_a)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = _run_operation(_a , _a , *_a)
assert my_res == py_res
assert str(_a) == str(_a)
assert set(_a) == set(_a)
assert len(_a) == len(_a)
assert set(my.items()) == set(py.items())
def lowerCamelCase__ ( ):
def is_public(_a) -> bool:
return not name.startswith("_")
SCREAMING_SNAKE_CASE : List[str] = {name for name in dir({}) if is_public(_a)}
SCREAMING_SNAKE_CASE : Union[str, Any] = {name for name in dir(HashMap()) if is_public(_a)}
assert dict_public_names > hash_public_names | 25 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCamelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =StableDiffusionXLImgaImgPipeline
lowerCamelCase__ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowerCamelCase__ =PipelineTesterMixin.required_optional_params - {'latents'}
lowerCamelCase__ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase__ =IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase__ =IMAGE_TO_IMAGE_IMAGE_PARAMS
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , attention_head_dim=(2, 4) , use_linear_projection=a , addition_embed_type="text_time" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
SCREAMING_SNAKE_CASE : str = EulerDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , steps_offset=1 , beta_schedule="scaled_linear" , timestep_spacing="leading" , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=32 , )
SCREAMING_SNAKE_CASE : Optional[int] = CLIPTextModel(a )
SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=a )
SCREAMING_SNAKE_CASE : str = CLIPTextModelWithProjection(a )
SCREAMING_SNAKE_CASE : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=a )
SCREAMING_SNAKE_CASE : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_encoder_2": text_encoder_a,
"tokenizer_2": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def __UpperCamelCase ( self : Tuple , a : Any , a : Optional[int]=0 ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a )
SCREAMING_SNAKE_CASE : Optional[Any] = image / 2 + 0.5
if str(a ).startswith("mps" ):
SCREAMING_SNAKE_CASE : int = torch.manual_seed(a )
else:
SCREAMING_SNAKE_CASE : int = torch.Generator(device=a ).manual_seed(a )
SCREAMING_SNAKE_CASE : List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "numpy",
"strength": 0.75,
}
return inputs
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionXLImgaImgPipeline(**a )
SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Any = self.get_dummy_inputs(a )
SCREAMING_SNAKE_CASE : Dict = sd_pipe(**a ).images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionXLImgaImgPipeline(**a )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe.to(a )
SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
# forward without prompt embeds
SCREAMING_SNAKE_CASE : Any = self.get_dummy_inputs(a )
SCREAMING_SNAKE_CASE : Any = 3 * ["this is a negative prompt"]
SCREAMING_SNAKE_CASE : Union[str, Any] = negative_prompt
SCREAMING_SNAKE_CASE : int = 3 * [inputs["prompt"]]
SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe(**a )
SCREAMING_SNAKE_CASE : int = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(a )
SCREAMING_SNAKE_CASE : Tuple = 3 * ["this is a negative prompt"]
SCREAMING_SNAKE_CASE : List[Any] = 3 * [inputs.pop("prompt" )]
(
(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,
) : Tuple = sd_pipe.encode_prompt(a , negative_prompt=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe(
**a , prompt_embeds=a , negative_prompt_embeds=a , pooled_prompt_embeds=a , negative_pooled_prompt_embeds=a , )
SCREAMING_SNAKE_CASE : int = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Any , a : int , a : str="cpu" , a : Dict=torch.floataa , a : Any=0 ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=a ).manual_seed(a )
SCREAMING_SNAKE_CASE : List[str] = np.random.RandomState(a ).standard_normal((1, 4, 64, 64) )
SCREAMING_SNAKE_CASE : Optional[int] = torch.from_numpy(a ).to(device=a , dtype=a )
SCREAMING_SNAKE_CASE : Dict = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base" )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_inputs(a )
SCREAMING_SNAKE_CASE : List[str] = pipe(**a ).images
SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.4_9493, 0.4_7896, 0.4_0798, 0.5_4214, 0.5_3212, 0.4_8202, 0.4_7656, 0.4_6329, 0.4_8506] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3 | 25 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {'configuration_van': ['VAN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VanConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'VAN_PRETRAINED_MODEL_ARCHIVE_LIST',
'VanForImageClassification',
'VanModel',
'VanPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 25 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_clap': [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapAudioConfig',
'ClapConfig',
'ClapTextConfig',
],
'processing_clap': ['ClapProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapModel',
'ClapPreTrainedModel',
'ClapTextModel',
'ClapTextModelWithProjection',
'ClapAudioModel',
'ClapAudioModelWithProjection',
]
a_ = ['ClapFeatureExtractor']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 25 |
from __future__ import annotations
def lowerCamelCase__ ( _a):
if len(_a) == 0:
return []
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = min(_a), max(_a)
SCREAMING_SNAKE_CASE : Dict = int(max_value - min_value) + 1
SCREAMING_SNAKE_CASE : list[list] = [[] for _ in range(_a)]
for i in my_list:
buckets[int(i - min_value)].append(_a)
return [v for bucket in buckets for v in sorted(_a)]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15] | 25 | 1 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =42
class _UpperCamelCase ( __A , __A ):
'''simple docstring'''
lowerCamelCase__ =True
@register_to_config
def __init__( self : Dict , a : int = 3 , a : int = 3 , a : Tuple[str] = ("DownEncoderBlock2D",) , a : Tuple[str] = ("UpDecoderBlock2D",) , a : Tuple[int] = (64,) , a : int = 1 , a : str = "silu" , a : int = 4 , a : int = 32 , a : int = 32 , a : float = 0.1_8215 , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
SCREAMING_SNAKE_CASE : List[Any] = Encoder(
in_channels=a , out_channels=a , down_block_types=a , block_out_channels=a , layers_per_block=a , act_fn=a , norm_num_groups=a , double_z=a , )
# pass init params to Decoder
SCREAMING_SNAKE_CASE : Dict = Decoder(
in_channels=a , out_channels=a , up_block_types=a , block_out_channels=a , layers_per_block=a , norm_num_groups=a , act_fn=a , )
SCREAMING_SNAKE_CASE : Any = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
SCREAMING_SNAKE_CASE : Dict = nn.Convad(a , a , 1 )
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
# only relevant if vae tiling is enabled
SCREAMING_SNAKE_CASE : Dict = self.config.sample_size
SCREAMING_SNAKE_CASE : int = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
SCREAMING_SNAKE_CASE : Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
SCREAMING_SNAKE_CASE : Union[str, Any] = 0.25
def __UpperCamelCase ( self : Optional[Any] , a : Dict , a : Tuple=False ) -> List[str]:
"""simple docstring"""
if isinstance(a , (Encoder, Decoder) ):
SCREAMING_SNAKE_CASE : Any = value
def __UpperCamelCase ( self : Tuple , a : bool = True ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = use_tiling
def __UpperCamelCase ( self : Any ) -> str:
"""simple docstring"""
self.enable_tiling(a )
def __UpperCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = True
def __UpperCamelCase ( self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __UpperCamelCase ( self : Any ) -> Dict[str, AttentionProcessor]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = {}
def fn_recursive_add_processors(a : str , a : torch.nn.Module , a : Dict[str, AttentionProcessor] ):
if hasattr(a , "set_processor" ):
SCREAMING_SNAKE_CASE : Any = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , a , a )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(a , a , a )
return processors
def __UpperCamelCase ( self : Tuple , a : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = len(self.attn_processors.keys() )
if isinstance(a , a ) and len(a ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(a )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(a : str , a : torch.nn.Module , a : Any ):
if hasattr(a , "set_processor" ):
if not isinstance(a , a ):
module.set_processor(a )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , a , a )
for name, module in self.named_children():
fn_recursive_attn_processor(a , a , a )
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __UpperCamelCase ( self : Optional[Any] , a : torch.FloatTensor , a : bool = True ) -> AutoencoderKLOutput:
"""simple docstring"""
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(a , return_dict=a )
if self.use_slicing and x.shape[0] > 1:
SCREAMING_SNAKE_CASE : str = [self.encoder(a ) for x_slice in x.split(1 )]
SCREAMING_SNAKE_CASE : Any = torch.cat(a )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = self.encoder(a )
SCREAMING_SNAKE_CASE : Any = self.quant_conv(a )
SCREAMING_SNAKE_CASE : Optional[Any] = DiagonalGaussianDistribution(a )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=a )
def __UpperCamelCase ( self : List[str] , a : torch.FloatTensor , a : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(a , return_dict=a )
SCREAMING_SNAKE_CASE : int = self.post_quant_conv(a )
SCREAMING_SNAKE_CASE : List[Any] = self.decoder(a )
if not return_dict:
return (dec,)
return DecoderOutput(sample=a )
@apply_forward_hook
def __UpperCamelCase ( self : str , a : torch.FloatTensor , a : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if self.use_slicing and z.shape[0] > 1:
SCREAMING_SNAKE_CASE : List[str] = [self._decode(a ).sample for z_slice in z.split(1 )]
SCREAMING_SNAKE_CASE : Any = torch.cat(a )
else:
SCREAMING_SNAKE_CASE : Optional[int] = self._decode(a ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=a )
def __UpperCamelCase ( self : List[Any] , a : Union[str, Any] , a : Optional[int] , a : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = min(a.shape[2] , b.shape[2] , a )
for y in range(a ):
SCREAMING_SNAKE_CASE : List[str] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __UpperCamelCase ( self : Any , a : Optional[int] , a : Optional[Any] , a : Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = min(a.shape[3] , b.shape[3] , a )
for x in range(a ):
SCREAMING_SNAKE_CASE : Tuple = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __UpperCamelCase ( self : Dict , a : torch.FloatTensor , a : bool = True ) -> AutoencoderKLOutput:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
SCREAMING_SNAKE_CASE : Optional[Any] = int(self.tile_latent_min_size * self.tile_overlap_factor )
SCREAMING_SNAKE_CASE : Dict = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
SCREAMING_SNAKE_CASE : Tuple = []
for i in range(0 , x.shape[2] , a ):
SCREAMING_SNAKE_CASE : Dict = []
for j in range(0 , x.shape[3] , a ):
SCREAMING_SNAKE_CASE : Dict = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.encoder(a )
SCREAMING_SNAKE_CASE : Optional[int] = self.quant_conv(a )
row.append(a )
rows.append(a )
SCREAMING_SNAKE_CASE : Tuple = []
for i, row in enumerate(a ):
SCREAMING_SNAKE_CASE : List[str] = []
for j, tile in enumerate(a ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
SCREAMING_SNAKE_CASE : Optional[Any] = self.blend_v(rows[i - 1][j] , a , a )
if j > 0:
SCREAMING_SNAKE_CASE : str = self.blend_h(row[j - 1] , a , a )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(a , dim=3 ) )
SCREAMING_SNAKE_CASE : List[str] = torch.cat(a , dim=2 )
SCREAMING_SNAKE_CASE : List[str] = DiagonalGaussianDistribution(a )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=a )
def __UpperCamelCase ( self : Optional[int] , a : torch.FloatTensor , a : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
SCREAMING_SNAKE_CASE : List[str] = int(self.tile_sample_min_size * self.tile_overlap_factor )
SCREAMING_SNAKE_CASE : Dict = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
SCREAMING_SNAKE_CASE : str = []
for i in range(0 , z.shape[2] , a ):
SCREAMING_SNAKE_CASE : Optional[Any] = []
for j in range(0 , z.shape[3] , a ):
SCREAMING_SNAKE_CASE : str = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
SCREAMING_SNAKE_CASE : Optional[int] = self.post_quant_conv(a )
SCREAMING_SNAKE_CASE : List[Any] = self.decoder(a )
row.append(a )
rows.append(a )
SCREAMING_SNAKE_CASE : Any = []
for i, row in enumerate(a ):
SCREAMING_SNAKE_CASE : Optional[int] = []
for j, tile in enumerate(a ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
SCREAMING_SNAKE_CASE : List[str] = self.blend_v(rows[i - 1][j] , a , a )
if j > 0:
SCREAMING_SNAKE_CASE : List[str] = self.blend_h(row[j - 1] , a , a )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(a , dim=3 ) )
SCREAMING_SNAKE_CASE : Dict = torch.cat(a , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=a )
def __UpperCamelCase ( self : List[Any] , a : torch.FloatTensor , a : bool = False , a : bool = True , a : Optional[torch.Generator] = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = sample
SCREAMING_SNAKE_CASE : Optional[int] = self.encode(a ).latent_dist
if sample_posterior:
SCREAMING_SNAKE_CASE : Optional[int] = posterior.sample(generator=a )
else:
SCREAMING_SNAKE_CASE : Tuple = posterior.mode()
SCREAMING_SNAKE_CASE : List[str] = self.decode(a ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=a ) | 25 |
a_ = frozenset(
[
'prompt',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
a_ = frozenset(['prompt', 'negative_prompt'])
a_ = frozenset([])
a_ = frozenset(['image'])
a_ = frozenset(
[
'image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['image'])
a_ = frozenset(
[
'prompt',
'image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
a_ = frozenset(['prompt', 'image', 'negative_prompt'])
a_ = frozenset(
[
# Text guided image variation with an image mask
'prompt',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
a_ = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt'])
a_ = frozenset(
[
# image variation with an image mask
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['image', 'mask_image'])
a_ = frozenset(
[
'example_image',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['example_image', 'image', 'mask_image'])
a_ = frozenset(['class_labels'])
a_ = frozenset(['class_labels'])
a_ = frozenset(['batch_size'])
a_ = frozenset([])
a_ = frozenset(['batch_size'])
a_ = frozenset([])
a_ = frozenset(
[
'prompt',
'audio_length_in_s',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
a_ = frozenset(['prompt', 'negative_prompt'])
a_ = frozenset(['input_tokens'])
a_ = frozenset(['input_tokens']) | 25 | 1 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCamelCase__ ( _a):
return getitem, k
def lowerCamelCase__ ( _a , _a):
return setitem, k, v
def lowerCamelCase__ ( _a):
return delitem, k
def lowerCamelCase__ ( _a , _a , *_a):
try:
return fun(_a , *_a), None
except Exception as e:
return None, e
a_ = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
a_ = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
a_ = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
a_ = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
a_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items"),
pytest.param(_overwrite_items , id="overwrite items"),
pytest.param(_delete_items , id="delete items"),
pytest.param(_access_absent_items , id="access absent items"),
pytest.param(_add_with_resize_up , id="add with resize up"),
pytest.param(_add_with_resize_down , id="add with resize down"),
) , )
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Dict = HashMap(initial_block_size=4)
SCREAMING_SNAKE_CASE : List[str] = {}
for _, (fun, *args) in enumerate(_a):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = _run_operation(_a , _a , *_a)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = _run_operation(_a , _a , *_a)
assert my_res == py_res
assert str(_a) == str(_a)
assert set(_a) == set(_a)
assert len(_a) == len(_a)
assert set(my.items()) == set(py.items())
def lowerCamelCase__ ( ):
def is_public(_a) -> bool:
return not name.startswith("_")
SCREAMING_SNAKE_CASE : List[str] = {name for name in dir({}) if is_public(_a)}
SCREAMING_SNAKE_CASE : Union[str, Any] = {name for name in dir(HashMap()) if is_public(_a)}
assert dict_public_names > hash_public_names | 25 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a_ = get_logger()
a_ = None
class _UpperCamelCase ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
'''simple docstring'''
def __init__( self : Optional[Any] , a : str=None , a : List[Any]=None , **a : Any ) -> Optional[Any]:
"""simple docstring"""
super().__init__(features=a )
import jax
from jaxlib.xla_client import Device
if isinstance(a , a ):
raise ValueError(
F"Expected {device} to be a `str` not {type(a )}, as `jaxlib.xla_extension.Device` "
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
SCREAMING_SNAKE_CASE : List[str] = device if isinstance(a , a ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE : str = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"Device with string identifier {self.device} not listed among the available "
F"devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default "
F"device: {str(jax.devices()[0] )}." )
SCREAMING_SNAKE_CASE : Any = str(jax.devices()[0] )
SCREAMING_SNAKE_CASE : Any = jnp_array_kwargs
@staticmethod
def __UpperCamelCase ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
"""simple docstring"""
import jax
return {str(a ): device for device in jax.devices()}
def __UpperCamelCase ( self : Dict , a : Tuple ) -> str:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(a , a ) and column:
if all(
isinstance(a , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(a , axis=0 )
return column
def __UpperCamelCase ( self : Dict , a : str ) -> str:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(a , (str, bytes, type(a )) ):
return value
elif isinstance(a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
SCREAMING_SNAKE_CASE : Dict = {"dtype": jnp.intaa}
else:
SCREAMING_SNAKE_CASE : str = {"dtype": jnp.intaa}
elif isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
SCREAMING_SNAKE_CASE : int = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a , PIL.Image.Image ):
SCREAMING_SNAKE_CASE : Dict = np.asarray(a )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE : Optional[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(a , **{**default_dtype, **self.jnp_array_kwargs} )
def __UpperCamelCase ( self : Any , a : List[str] ) -> Dict:
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(a , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(a , "__array__" ) and not isinstance(a , jax.Array ):
SCREAMING_SNAKE_CASE : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
elif isinstance(a , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
return self._tensorize(a )
def __UpperCamelCase ( self : Optional[Any] , a : dict ) -> Dict:
"""simple docstring"""
return map_nested(self._recursive_tensorize , a , map_list=a )
def __UpperCamelCase ( self : Dict , a : pa.Table ) -> Mapping:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.numpy_arrow_extractor().extract_row(a )
SCREAMING_SNAKE_CASE : List[Any] = self.python_features_decoder.decode_row(a )
return self.recursive_tensorize(a )
def __UpperCamelCase ( self : Optional[int] , a : pa.Table ) -> "jax.Array":
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.numpy_arrow_extractor().extract_column(a )
SCREAMING_SNAKE_CASE : Optional[Any] = self.python_features_decoder.decode_column(a , pa_table.column_names[0] )
SCREAMING_SNAKE_CASE : Tuple = self.recursive_tensorize(a )
SCREAMING_SNAKE_CASE : Optional[int] = self._consolidate(a )
return column
def __UpperCamelCase ( self : List[Any] , a : pa.Table ) -> Mapping:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.numpy_arrow_extractor().extract_batch(a )
SCREAMING_SNAKE_CASE : str = self.python_features_decoder.decode_batch(a )
SCREAMING_SNAKE_CASE : List[Any] = self.recursive_tensorize(a )
for column_name in batch:
SCREAMING_SNAKE_CASE : List[Any] = self._consolidate(batch[column_name] )
return batch | 25 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__A )
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
lowerCamelCase__ =Features({'text': Value('string' )} )
lowerCamelCase__ =Features({'labels': ClassLabel} )
lowerCamelCase__ ="text"
lowerCamelCase__ ="labels"
def __UpperCamelCase ( self : List[Any] , a : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(F"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , a ):
raise ValueError(F"Column {self.label_column} is not a ClassLabel." )
SCREAMING_SNAKE_CASE : Dict = copy.deepcopy(self )
SCREAMING_SNAKE_CASE : List[Any] = self.label_schema.copy()
SCREAMING_SNAKE_CASE : Optional[int] = features[self.label_column]
SCREAMING_SNAKE_CASE : Any = label_schema
return task_template
@property
def __UpperCamelCase ( self : Optional[int] ) -> Dict[str, str]:
"""simple docstring"""
return {
self.text_column: "text",
self.label_column: "labels",
} | 25 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _UpperCamelCase :
'''simple docstring'''
@staticmethod
def __UpperCamelCase ( *a : str , **a : int ) -> str:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =MODEL_FOR_OBJECT_DETECTION_MAPPING
def __UpperCamelCase ( self : Optional[Any] , a : str , a : Optional[Any] , a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = ObjectDetectionPipeline(model=a , image_processor=a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __UpperCamelCase ( self : List[Any] , a : Optional[int] , a : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(a ) , 0 )
for detected_object in outputs:
self.assertEqual(
a , {
"score": ANY(a ),
"label": ANY(a ),
"box": {"xmin": ANY(a ), "ymin": ANY(a ), "xmax": ANY(a ), "ymax": ANY(a )},
} , )
import datasets
SCREAMING_SNAKE_CASE : Any = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
SCREAMING_SNAKE_CASE : Dict = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
SCREAMING_SNAKE_CASE : Tuple = object_detector(a , threshold=0.0 )
self.assertEqual(len(a ) , len(a ) )
for outputs in batch_outputs:
self.assertGreater(len(a ) , 0 )
for detected_object in outputs:
self.assertEqual(
a , {
"score": ANY(a ),
"label": ANY(a ),
"box": {"xmin": ANY(a ), "ymin": ANY(a ), "xmax": ANY(a ), "ymax": ANY(a )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def __UpperCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
pass
@require_torch
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = "hf-internal-testing/tiny-detr-mobilenetsv3"
SCREAMING_SNAKE_CASE : Dict = AutoModelForObjectDetection.from_pretrained(a )
SCREAMING_SNAKE_CASE : Any = AutoFeatureExtractor.from_pretrained(a )
SCREAMING_SNAKE_CASE : Tuple = ObjectDetectionPipeline(model=a , feature_extractor=a )
SCREAMING_SNAKE_CASE : int = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
SCREAMING_SNAKE_CASE : Dict = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(a )
SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained(a )
SCREAMING_SNAKE_CASE : int = ObjectDetectionPipeline(model=a , feature_extractor=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
SCREAMING_SNAKE_CASE : int = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : Tuple = pipeline("object-detection" , model=a )
SCREAMING_SNAKE_CASE : Tuple = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
SCREAMING_SNAKE_CASE : str = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = 0.9985
SCREAMING_SNAKE_CASE : int = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : List[str] = pipeline("object-detection" , model=a )
SCREAMING_SNAKE_CASE : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=a )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = "Narsil/layoutlmv3-finetuned-funsd"
SCREAMING_SNAKE_CASE : Dict = 0.9993
SCREAMING_SNAKE_CASE : str = pipeline("object-detection" , model=a , threshold=a )
SCREAMING_SNAKE_CASE : List[Any] = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , ) | 25 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='decision_transformer'
lowerCamelCase__ =['past_key_values']
lowerCamelCase__ ={
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : str , a : List[str]=17 , a : Optional[int]=4 , a : List[str]=128 , a : Union[str, Any]=4096 , a : Union[str, Any]=True , a : Dict=1 , a : Optional[Any]=1024 , a : int=3 , a : Any=1 , a : str=None , a : List[Any]="relu" , a : Optional[Any]=0.1 , a : int=0.1 , a : Dict=0.1 , a : List[str]=1e-5 , a : List[str]=0.02 , a : str=True , a : Any=True , a : Tuple=5_0256 , a : List[str]=5_0256 , a : Dict=False , a : int=False , **a : Any , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = state_dim
SCREAMING_SNAKE_CASE : Optional[Any] = act_dim
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : int = max_ep_len
SCREAMING_SNAKE_CASE : Any = action_tanh
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = n_positions
SCREAMING_SNAKE_CASE : Union[str, Any] = n_layer
SCREAMING_SNAKE_CASE : Dict = n_head
SCREAMING_SNAKE_CASE : List[Any] = n_inner
SCREAMING_SNAKE_CASE : List[str] = activation_function
SCREAMING_SNAKE_CASE : Optional[Any] = resid_pdrop
SCREAMING_SNAKE_CASE : Dict = embd_pdrop
SCREAMING_SNAKE_CASE : List[str] = attn_pdrop
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : int = scale_attn_weights
SCREAMING_SNAKE_CASE : List[str] = use_cache
SCREAMING_SNAKE_CASE : Union[str, Any] = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE : Optional[int] = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE : Union[str, Any] = bos_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = eos_token_id
super().__init__(bos_token_id=a , eos_token_id=a , **a ) | 25 |
def lowerCamelCase__ ( _a):
if not isinstance(_a , _a):
SCREAMING_SNAKE_CASE : Tuple = f"Input value of [number={number}] must be an integer"
raise TypeError(_a)
if number < 0:
return False
SCREAMING_SNAKE_CASE : Union[str, Any] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 25 | 1 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __UpperCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(a )
def __UpperCamelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self._create_example_records()
SCREAMING_SNAKE_CASE : int = Dataset.from_list(a )
self.assertListEqual(dset.column_names , ["col_1", "col_2"] )
for i, r in enumerate(a ):
self.assertDictEqual(a , example_records[i] )
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self._create_example_records()
SCREAMING_SNAKE_CASE : List[str] = Dataset.from_list(a )
SCREAMING_SNAKE_CASE : List[str] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]: # checks what happens with missing columns
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = [{"col_1": 1}, {"col_2": "x"}]
SCREAMING_SNAKE_CASE : int = Dataset.from_list(a )
self.assertDictEqual(dset[0] , {"col_1": 1} )
self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns
def __UpperCamelCase ( self : Optional[int] ) -> Dict: # checks if the type can be inferred from the second record
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = [{"col_1": []}, {"col_1": [1, 2]}]
SCREAMING_SNAKE_CASE : Optional[Any] = Dataset.from_list(a )
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) )
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = Dataset.from_list([] )
self.assertEqual(len(a ) , 0 )
self.assertListEqual(dset.column_names , [] ) | 25 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __init__( self : Dict , a : Tuple , a : Any=13 , a : Any=7 , a : Union[str, Any]=True , a : List[Any]=True , a : List[str]=False , a : List[str]=True , a : Any=99 , a : str=32 , a : Any=5 , a : Optional[int]=4 , a : Union[str, Any]=37 , a : Dict="gelu" , a : List[Any]=0.1 , a : Optional[Any]=0.1 , a : List[str]=512 , a : Union[str, Any]=16 , a : str=2 , a : Dict=0.02 , a : Optional[int]=3 , a : Union[str, Any]=4 , a : int=None , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Any = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = seq_length
SCREAMING_SNAKE_CASE : List[Any] = is_training
SCREAMING_SNAKE_CASE : int = use_input_mask
SCREAMING_SNAKE_CASE : Tuple = use_token_type_ids
SCREAMING_SNAKE_CASE : str = use_labels
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Tuple = num_labels
SCREAMING_SNAKE_CASE : Tuple = num_choices
SCREAMING_SNAKE_CASE : Optional[Any] = scope
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : str = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self : Optional[Any] , a : int , a : Optional[int] , a : Optional[int] , a : Dict , a : str , a : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = DistilBertModel(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(a , a )
SCREAMING_SNAKE_CASE : Optional[Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Tuple , a : Optional[int] , a : Dict , a : Tuple , a : int , a : int , a : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = DistilBertForMaskedLM(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : str = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : List[Any] , a : int , a : Optional[Any] , a : Optional[Any] , a : str , a : str , a : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Optional[int] , a : str , a : Any , a : int , a : Optional[Any] , a : int , a : str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = DistilBertForSequenceClassification(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Optional[Any] , a : List[Any] , a : Optional[int] , a : Union[str, Any] , a : Dict , a : Any , a : Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : List[str] = DistilBertForTokenClassification(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : int , a : Any , a : Optional[int] , a : Union[str, Any] , a : Tuple , a : Optional[int] , a : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.num_choices
SCREAMING_SNAKE_CASE : Any = DistilBertForMultipleChoice(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Optional[Any] = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE)) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ =(
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = DistilBertModelTester(self )
SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=a , dim=37 )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a )
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a )
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a )
def __UpperCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a )
@slow
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Any = model_class(config=a )
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(a , a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.jit.trace(
a , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , "traced_model.pt" ) )
SCREAMING_SNAKE_CASE : Tuple = torch.jit.load(os.path.join(a , "traced_model.pt" ) , map_location=a )
loaded(inputs_dict["input_ids"].to(a ) , inputs_dict["attention_mask"].to(a ) )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = DistilBertModel.from_pretrained("distilbert-base-uncased" )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(a , attention_mask=a )[0]
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) ) | 25 | 1 |
def lowerCamelCase__ ( _a , _a):
return 1 if input_a == input_a else 0
def lowerCamelCase__ ( ):
assert xnor_gate(0 , 0) == 1
assert xnor_gate(0 , 1) == 0
assert xnor_gate(1 , 0) == 0
assert xnor_gate(1 , 1) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1)) | 25 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 25 | 1 |
from PIL import Image
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = image.size
SCREAMING_SNAKE_CASE : Tuple = 0
SCREAMING_SNAKE_CASE : Tuple = image.load()
for i in range(_a):
for j in range(_a):
SCREAMING_SNAKE_CASE : Union[str, Any] = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(_a):
for i in range(_a):
SCREAMING_SNAKE_CASE : List[Any] = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
a_ = mean_threshold(Image.open('path_to_image').convert('L'))
image.save('output_image_path') | 25 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE : Union[str, Any] = KarrasVeScheduler()
SCREAMING_SNAKE_CASE : Any = KarrasVePipeline(unet=a , scheduler=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(num_inference_steps=2 , generator=a , output_type="numpy" ).images
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = pipe(num_inference_steps=2 , generator=a , output_type="numpy" , return_dict=a )[0]
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = "google/ncsnpp-celebahq-256"
SCREAMING_SNAKE_CASE : List[Any] = UNetaDModel.from_pretrained(a )
SCREAMING_SNAKE_CASE : Any = KarrasVeScheduler()
SCREAMING_SNAKE_CASE : Optional[Any] = KarrasVePipeline(unet=a , scheduler=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe(num_inference_steps=20 , generator=a , output_type="numpy" ).images
SCREAMING_SNAKE_CASE : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 25 | 1 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def lowerCamelCase__ ( _a , _a , _a , _a=None , _a=None , _a=None , _a=None , _a=None , ):
if attention_mask is None:
SCREAMING_SNAKE_CASE : Optional[int] = input_ids.ne(config.pad_token_id)
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE : Tuple = decoder_input_ids.ne(config.pad_token_id)
if head_mask is None:
SCREAMING_SNAKE_CASE : str = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=_a)
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE : Optional[int] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_a)
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE : List[Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_a)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[Any] , a : Optional[Any] , a : Any=13 , a : Optional[int]=7 , a : Optional[Any]=True , a : Dict=False , a : List[str]=99 , a : Any=16 , a : Optional[int]=2 , a : Union[str, Any]=4 , a : List[Any]=4 , a : Dict="relu" , a : Any=0.1 , a : Optional[Any]=0.1 , a : str=0.0 , a : List[Any]=0.0 , a : Dict=20 , a : Optional[int]=2 , a : Optional[Any]=1 , a : Any=0 , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : str = seq_length
SCREAMING_SNAKE_CASE : str = is_training
SCREAMING_SNAKE_CASE : List[Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : Dict = hidden_size
SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = encoder_layerdrop
SCREAMING_SNAKE_CASE : int = decoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = eos_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = pad_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = bos_token_id
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Any = self.eos_token_id # Eos Token
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
SCREAMING_SNAKE_CASE : int = input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE : str = decoder_input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_config()
SCREAMING_SNAKE_CASE : str = prepare_mam_aaa_inputs_dict(a , a , a )
return config, inputs_dict
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCamelCase ( self : Dict , a : Tuple , a : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = MaMaaaModel(config=a ).get_decoder().to(a ).eval()
SCREAMING_SNAKE_CASE : Optional[int] = inputs_dict["input_ids"]
SCREAMING_SNAKE_CASE : Optional[Any] = inputs_dict["attention_mask"]
SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict["head_mask"]
# first forward pass
SCREAMING_SNAKE_CASE : Union[str, Any] = model(a , attention_mask=a , head_mask=a , use_cache=a )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE : int = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE : List[str] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
SCREAMING_SNAKE_CASE : int = model(a , attention_mask=a )["last_hidden_state"]
SCREAMING_SNAKE_CASE : Optional[int] = model(a , attention_mask=a , past_key_values=a )[
"last_hidden_state"
]
# select random slice
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1e-2 ) )
def __UpperCamelCase ( self : Any , a : Any , a : Any ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = MaMaaaModel(config=a ).to(a ).eval()
SCREAMING_SNAKE_CASE : str = model(**a )
SCREAMING_SNAKE_CASE : Dict = outputs.encoder_last_hidden_state
SCREAMING_SNAKE_CASE : Dict = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Optional[Any] = model.get_encoder()
encoder.save_pretrained(a )
SCREAMING_SNAKE_CASE : Dict = MaMaaaEncoder.from_pretrained(a ).to(a )
SCREAMING_SNAKE_CASE : Optional[Any] = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Optional[Any] = model.get_decoder()
decoder.save_pretrained(a )
SCREAMING_SNAKE_CASE : str = MaMaaaDecoder.from_pretrained(a ).to(a )
SCREAMING_SNAKE_CASE : Dict = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=a , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class _UpperCamelCase ( __A , __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
lowerCamelCase__ =(MaMaaaForConditionalGeneration,) if is_torch_available() else ()
lowerCamelCase__ =(
{
'conversational': MaMaaaForConditionalGeneration,
'feature-extraction': MaMaaaModel,
'summarization': MaMaaaForConditionalGeneration,
'text2text-generation': MaMaaaForConditionalGeneration,
'translation': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =False
lowerCamelCase__ =False
def __UpperCamelCase ( self : List[Any] , a : Tuple , a : int , a : Optional[Any] , a : Optional[Any] , a : Tuple ) -> Dict:
"""simple docstring"""
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = MaMaaaModelTester(self )
SCREAMING_SNAKE_CASE : Tuple = ConfigTester(self , config_class=a )
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Any ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class(a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = model_class.from_pretrained(a , output_loading_info=a )
self.assertEqual(info["missing_keys"] , [] )
def __UpperCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*a )
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*a )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
SCREAMING_SNAKE_CASE : List[str] = model_class(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(self._prepare_for_class(a , a ) )
if not self.is_encoder_decoder:
SCREAMING_SNAKE_CASE : Optional[Any] = inputs["input_ids"]
del inputs["input_ids"]
else:
SCREAMING_SNAKE_CASE : str = inputs["input_ids"]
SCREAMING_SNAKE_CASE : Optional[Any] = inputs.get("decoder_input_ids" , a )
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , a )
SCREAMING_SNAKE_CASE : Any = model.get_input_embeddings()
if not self.is_encoder_decoder:
SCREAMING_SNAKE_CASE : Tuple = wte(a )
else:
SCREAMING_SNAKE_CASE : List[str] = wte(a )
SCREAMING_SNAKE_CASE : Dict = wte(a )
with torch.no_grad():
model(**a )[0]
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : Optional[int] = input_dict["input_ids"]
SCREAMING_SNAKE_CASE : Optional[int] = input_ids.ne(1 ).to(a )
SCREAMING_SNAKE_CASE : str = MaMaaaForConditionalGeneration(a ).eval().to(a )
if torch_device == "cuda":
model.half()
model.generate(a , attention_mask=a )
model.generate(num_beams=4 , do_sample=a , early_stopping=a , num_return_sequences=3 )
def lowerCamelCase__ ( _a):
return torch.tensor(_a , dtype=torch.long , device=_a)
a_ = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(a )
SCREAMING_SNAKE_CASE : List[str] = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]] )
SCREAMING_SNAKE_CASE : Union[str, Any] = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]] )
SCREAMING_SNAKE_CASE : Any = prepare_mam_aaa_inputs_dict(model.config , a , a )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(**a )[0]
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 11, 1024) )
self.assertEqual(output.shape , a )
# change to expected output here
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=a )
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=a ) )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(a )
# change to intended input
SCREAMING_SNAKE_CASE : str = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]] )
SCREAMING_SNAKE_CASE : Dict = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]] )
SCREAMING_SNAKE_CASE : Any = prepare_mam_aaa_inputs_dict(model.config , a , a )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**a )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , a )
# change to expected output here
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=a )
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=a ) )
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(a )
SCREAMING_SNAKE_CASE : Tuple = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" )
SCREAMING_SNAKE_CASE : str = [
"L'affaire NSA souligne l'absence totale de dรฉbat sur le renseignement",
"Selon moi, il y a deux niveaux de rรฉponse de la part du gouvernement franรงais.",
"Lorsque Franรงois Hollande tรฉlรฉphone ร Barack Obama ou quand le ministre des affaires รฉtrangรจres Laurent"
" Fabius convoque l'ambassadeur des Etats-Unis, ils rรฉagissent ร une vraie dรฉcouverte, qui est celle de"
" l'ampleur de la surveillance amรฉricaine sur l'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(a , padding=a , return_tensors="pt" )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.generate(
input_ids=dct["input_ids"].to(a ) , attention_mask=dct["attention_mask"].to(a ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , )
SCREAMING_SNAKE_CASE : str = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When Franรงois Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."
" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"
" communications in France.",
]
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=a , skip_special_tokens=a )
assert generated == expected_en | 25 |
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Optional[int] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : Optional[int] = 0
while b > 0:
if b & 1:
SCREAMING_SNAKE_CASE : Optional[Any] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res | 25 | 1 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : List[Any] = torch.load(_a , map_location="cpu")
if "model" in sd.keys():
SCREAMING_SNAKE_CASE : Any = torch.load(_a , map_location="cpu")["model"]
# pop unnecessary weights
SCREAMING_SNAKE_CASE : str = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_a)
SCREAMING_SNAKE_CASE : Any = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
SCREAMING_SNAKE_CASE : Union[str, Any] = sd.pop(_a)
SCREAMING_SNAKE_CASE : List[Any] = list(sd.keys())
for key in keys:
if ".qkv_proj." in key:
SCREAMING_SNAKE_CASE : Union[str, Any] = sd[key]
# We split QKV in separate Q,K,V
SCREAMING_SNAKE_CASE : Tuple = key.replace(".qkv_proj." , ".q_proj.")
SCREAMING_SNAKE_CASE : str = key.replace(".qkv_proj." , ".k_proj.")
SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(".qkv_proj." , ".v_proj.")
SCREAMING_SNAKE_CASE : int = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = torch.split(_a , depth // 3 , dim=0)
SCREAMING_SNAKE_CASE : str = q
SCREAMING_SNAKE_CASE : Optional[int] = k
SCREAMING_SNAKE_CASE : List[str] = v
del sd[key]
return sd
@torch.no_grad()
def lowerCamelCase__ ( _a , _a , _a=None):
SCREAMING_SNAKE_CASE : str = load_checkpoint(_a)
if config is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = OPTConfig.from_pretrained(_a)
else:
SCREAMING_SNAKE_CASE : str = OPTConfig()
SCREAMING_SNAKE_CASE : Tuple = OPTModel(_a).half().eval()
model.load_state_dict(_a)
# Check results
Path(_a).mkdir(exist_ok=_a)
model.save_pretrained(_a)
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
a_ = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config) | 25 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='roformer'
def __init__( self : Dict , a : Any=5_0000 , a : List[Any]=None , a : str=768 , a : str=12 , a : Tuple=12 , a : Optional[Any]=3072 , a : List[str]="gelu" , a : List[Any]=0.1 , a : Union[str, Any]=0.1 , a : Tuple=1536 , a : List[str]=2 , a : Tuple=0.02 , a : Any=1e-12 , a : Optional[int]=0 , a : Union[str, Any]=False , a : int=True , **a : str , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=a , **a )
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size if embedding_size is None else embedding_size
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE : Any = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = rotary_value
SCREAMING_SNAKE_CASE : int = use_cache
class _UpperCamelCase ( __A ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE : str = {0: "batch", 1: "sequence"}
SCREAMING_SNAKE_CASE : List[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] ) | 25 | 1 |
from cva import destroyAllWindows, imread, imshow, waitKey
def lowerCamelCase__ ( _a):
# getting number of pixels in the image
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(_a):
for j in range(_a):
SCREAMING_SNAKE_CASE : Dict = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
a_ = imread('image_data/lena.jpg', 1)
# convert to its negative
a_ = convert_to_negative(img)
# show result image
imshow('negative of original image', img)
waitKey(0)
destroyAllWindows() | 25 |
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
a_ = logging.getLogger(__name__)
a_ = 'Hello world! cรฉcรฉ herlolip'
a_ = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : List[Any] = BertAbsConfig(
temp_dir="." , finetune_bert=_a , large=_a , share_emb=_a , use_bert_emb=_a , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
SCREAMING_SNAKE_CASE : Dict = torch.load(_a , lambda _a , _a: storage)
SCREAMING_SNAKE_CASE : str = AbsSummarizer(_a , torch.device("cpu") , _a)
original.eval()
SCREAMING_SNAKE_CASE : List[str] = BertAbsSummarizer(_a , torch.device("cpu"))
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model")
new_model.bert.load_state_dict(original.bert.state_dict())
new_model.decoder.load_state_dict(original.decoder.state_dict())
new_model.generator.load_state_dict(original.generator.state_dict())
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical")
SCREAMING_SNAKE_CASE : List[str] = BertTokenizer.from_pretrained("bert-base-uncased")
# prepare the model inputs
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode("This is sample รฉร alj'-.")
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_a)))
SCREAMING_SNAKE_CASE : int = torch.tensor(_a).unsqueeze(0)
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode("This is sample 3 รฉร alj'-.")
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_a)))
SCREAMING_SNAKE_CASE : int = torch.tensor(_a).unsqueeze(0)
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight)) == 0
# forward pass
SCREAMING_SNAKE_CASE : List[Any] = encoder_input_ids
SCREAMING_SNAKE_CASE : List[Any] = decoder_input_ids
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Dict = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
SCREAMING_SNAKE_CASE : Optional[int] = original(_a , _a , _a , _a , _a , _a , _a)[0]
SCREAMING_SNAKE_CASE : Dict = original.generator(_a)
SCREAMING_SNAKE_CASE : Any = new_model(
_a , _a , _a , _a , _a)[0]
SCREAMING_SNAKE_CASE : Tuple = new_model.generator(_a)
SCREAMING_SNAKE_CASE : List[Any] = torch.max(torch.abs(output_converted_model - output_original_model)).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_a))
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.max(torch.abs(output_converted_generator - output_original_generator)).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_a))
SCREAMING_SNAKE_CASE : int = torch.allclose(_a , _a , atol=1E-3)
if are_identical:
logging.info("all weights are equal up to 1e-3")
else:
raise ValueError("the weights are different. The new model is likely different from the original one.")
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary")
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
a_ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
) | 25 | 1 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
a_ = random.Random()
def lowerCamelCase__ ( _a , _a=1.0 , _a=None , _a=None):
if rng is None:
SCREAMING_SNAKE_CASE : Optional[Any] = global_rng
SCREAMING_SNAKE_CASE : Optional[int] = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
@require_torch
@require_torchaudio
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , a : Optional[Any] , a : int=7 , a : int=400 , a : Optional[Any]=2000 , a : List[str]=10 , a : List[str]=160 , a : str=8 , a : Optional[int]=0.0 , a : Optional[Any]=4000 , a : Union[str, Any]=False , a : int=True , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = parent
SCREAMING_SNAKE_CASE : Optional[int] = batch_size
SCREAMING_SNAKE_CASE : Dict = min_seq_length
SCREAMING_SNAKE_CASE : int = max_seq_length
SCREAMING_SNAKE_CASE : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE : List[Any] = padding_value
SCREAMING_SNAKE_CASE : Dict = sampling_rate
SCREAMING_SNAKE_CASE : Optional[Any] = return_attention_mask
SCREAMING_SNAKE_CASE : Tuple = do_normalize
SCREAMING_SNAKE_CASE : Tuple = feature_size
SCREAMING_SNAKE_CASE : List[str] = chunk_length
SCREAMING_SNAKE_CASE : List[str] = hop_length
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCamelCase ( self : Union[str, Any] , a : List[Any]=False , a : int=False ) -> Dict:
"""simple docstring"""
def _flatten(a : Optional[Any] ):
return list(itertools.chain(*a ) )
if equal_length:
SCREAMING_SNAKE_CASE : int = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE : Tuple = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE : Dict = [np.asarray(a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =WhisperFeatureExtractor if is_speech_available() else None
def __UpperCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = WhisperFeatureExtractionTester(self )
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Any = feat_extract_first.save_pretrained(a )[0]
check_json_file_has_correct_format(a )
SCREAMING_SNAKE_CASE : str = self.feature_extraction_class.from_pretrained(a )
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : Union[str, Any] = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : List[Any] = feat_extract_first.mel_filters
SCREAMING_SNAKE_CASE : int = feat_extract_second.mel_filters
self.assertTrue(np.allclose(a , a ) )
self.assertEqual(a , a )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(a , "feat_extract.json" )
feat_extract_first.to_json_file(a )
SCREAMING_SNAKE_CASE : Tuple = self.feature_extraction_class.from_json_file(a )
SCREAMING_SNAKE_CASE : Union[str, Any] = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : str = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : Tuple = feat_extract_first.mel_filters
SCREAMING_SNAKE_CASE : int = feat_extract_second.mel_filters
self.assertTrue(np.allclose(a , a ) )
self.assertEqual(a , a )
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE : Any = [np.asarray(a ) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE : Union[str, Any] = feature_extractor(a , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
SCREAMING_SNAKE_CASE : int = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(a , a , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE : List[str] = feature_extractor(a , return_tensors="np" ).input_features
SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(a , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a , a ):
self.assertTrue(np.allclose(a , a , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE : Tuple = np.asarray(a )
SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(a , return_tensors="np" ).input_features
SCREAMING_SNAKE_CASE : List[str] = feature_extractor(a , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a , a ):
self.assertTrue(np.allclose(a , a , atol=1e-3 ) )
# Test truncation required
SCREAMING_SNAKE_CASE : str = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
SCREAMING_SNAKE_CASE : List[Any] = [np.asarray(a ) for speech_input in speech_inputs]
SCREAMING_SNAKE_CASE : Union[str, Any] = [x[: feature_extractor.n_samples] for x in speech_inputs]
SCREAMING_SNAKE_CASE : int = [np.asarray(a ) for speech_input in speech_inputs_truncated]
SCREAMING_SNAKE_CASE : Tuple = feature_extractor(a , return_tensors="np" ).input_features
SCREAMING_SNAKE_CASE : Union[str, Any] = feature_extractor(a , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a , a ):
self.assertTrue(np.allclose(a , a , atol=1e-3 ) )
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.rand(100 , 32 ).astype(np.floataa )
SCREAMING_SNAKE_CASE : Optional[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE : str = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __UpperCamelCase ( self : Any , a : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE : Union[str, Any] = ds.sort("id" ).select(range(a ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
SCREAMING_SNAKE_CASE : Optional[int] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE : List[Any] = WhisperFeatureExtractor()
SCREAMING_SNAKE_CASE : List[str] = feature_extractor(a , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , a , atol=1e-4 ) )
def __UpperCamelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE : int = self._load_datasamples(1 )[0]
SCREAMING_SNAKE_CASE : Tuple = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
SCREAMING_SNAKE_CASE : int = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=a )[0]
self.assertTrue(np.all(np.mean(a ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a ) - 1 ) < 1e-3 ) ) | 25 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
a_ = parser.parse_args()
a_ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
a_ = CLIPImageProcessor()
a_ = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
a_ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path) | 25 | 1 |
import mpmath # for roots of unity
import numpy as np
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Any , a : Any=None , a : List[Any]=None ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = list(poly_a or [0] )[:]
SCREAMING_SNAKE_CASE : str = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
SCREAMING_SNAKE_CASE : Union[str, Any] = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
SCREAMING_SNAKE_CASE : Dict = len(self.polyB )
# Add 0 to make lengths equal a power of 2
SCREAMING_SNAKE_CASE : str = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
SCREAMING_SNAKE_CASE : Any = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
SCREAMING_SNAKE_CASE : Any = self.__multiply()
def __UpperCamelCase ( self : int , a : List[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = [[x] for x in self.polyA] if which == "A" else [[x] for x in self.polyB]
# Corner case
if len(a ) <= 1:
return dft[0]
#
SCREAMING_SNAKE_CASE : int = self.c_max_length // 2
while next_ncol > 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = [[] for i in range(a )]
SCREAMING_SNAKE_CASE : Optional[Any] = self.root**next_ncol
# First half of next step
SCREAMING_SNAKE_CASE : List[str] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(a ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
SCREAMING_SNAKE_CASE : List[Any] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(a ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
SCREAMING_SNAKE_CASE : Any = new_dft
SCREAMING_SNAKE_CASE : Union[str, Any] = next_ncol // 2
return dft[0]
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.__dft("A" )
SCREAMING_SNAKE_CASE : int = self.__dft("B" )
SCREAMING_SNAKE_CASE : Optional[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
SCREAMING_SNAKE_CASE : List[str] = 2
while next_ncol <= self.c_max_length:
SCREAMING_SNAKE_CASE : Any = [[] for i in range(a )]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.root ** (next_ncol // 2)
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
SCREAMING_SNAKE_CASE : List[str] = new_inverse_c
next_ncol *= 2
# Unpack
SCREAMING_SNAKE_CASE : List[str] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = "A = " + " + ".join(
F"{coef}*x^{i}" for coef, i in enumerate(self.polyA[: self.len_A] ) )
SCREAMING_SNAKE_CASE : Tuple = "B = " + " + ".join(
F"{coef}*x^{i}" for coef, i in enumerate(self.polyB[: self.len_B] ) )
SCREAMING_SNAKE_CASE : List[Any] = "A*B = " + " + ".join(
F"{coef}*x^{i}" for coef, i in enumerate(self.product ) )
return F"{a}\n{b}\n{c}"
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod() | 25 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 25 | 1 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a_ = get_logger()
a_ = None
class _UpperCamelCase ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
'''simple docstring'''
def __init__( self : Optional[Any] , a : str=None , a : List[Any]=None , **a : Any ) -> Optional[Any]:
"""simple docstring"""
super().__init__(features=a )
import jax
from jaxlib.xla_client import Device
if isinstance(a , a ):
raise ValueError(
F"Expected {device} to be a `str` not {type(a )}, as `jaxlib.xla_extension.Device` "
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
SCREAMING_SNAKE_CASE : List[str] = device if isinstance(a , a ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE : str = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"Device with string identifier {self.device} not listed among the available "
F"devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default "
F"device: {str(jax.devices()[0] )}." )
SCREAMING_SNAKE_CASE : Any = str(jax.devices()[0] )
SCREAMING_SNAKE_CASE : Any = jnp_array_kwargs
@staticmethod
def __UpperCamelCase ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
"""simple docstring"""
import jax
return {str(a ): device for device in jax.devices()}
def __UpperCamelCase ( self : Dict , a : Tuple ) -> str:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(a , a ) and column:
if all(
isinstance(a , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(a , axis=0 )
return column
def __UpperCamelCase ( self : Dict , a : str ) -> str:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(a , (str, bytes, type(a )) ):
return value
elif isinstance(a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
SCREAMING_SNAKE_CASE : Dict = {"dtype": jnp.intaa}
else:
SCREAMING_SNAKE_CASE : str = {"dtype": jnp.intaa}
elif isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
SCREAMING_SNAKE_CASE : int = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a , PIL.Image.Image ):
SCREAMING_SNAKE_CASE : Dict = np.asarray(a )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE : Optional[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(a , **{**default_dtype, **self.jnp_array_kwargs} )
def __UpperCamelCase ( self : Any , a : List[str] ) -> Dict:
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(a , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(a , "__array__" ) and not isinstance(a , jax.Array ):
SCREAMING_SNAKE_CASE : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
elif isinstance(a , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
return self._tensorize(a )
def __UpperCamelCase ( self : Optional[Any] , a : dict ) -> Dict:
"""simple docstring"""
return map_nested(self._recursive_tensorize , a , map_list=a )
def __UpperCamelCase ( self : Dict , a : pa.Table ) -> Mapping:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.numpy_arrow_extractor().extract_row(a )
SCREAMING_SNAKE_CASE : List[Any] = self.python_features_decoder.decode_row(a )
return self.recursive_tensorize(a )
def __UpperCamelCase ( self : Optional[int] , a : pa.Table ) -> "jax.Array":
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.numpy_arrow_extractor().extract_column(a )
SCREAMING_SNAKE_CASE : Optional[Any] = self.python_features_decoder.decode_column(a , pa_table.column_names[0] )
SCREAMING_SNAKE_CASE : Tuple = self.recursive_tensorize(a )
SCREAMING_SNAKE_CASE : Optional[int] = self._consolidate(a )
return column
def __UpperCamelCase ( self : List[Any] , a : pa.Table ) -> Mapping:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.numpy_arrow_extractor().extract_batch(a )
SCREAMING_SNAKE_CASE : str = self.python_features_decoder.decode_batch(a )
SCREAMING_SNAKE_CASE : List[Any] = self.recursive_tensorize(a )
for column_name in batch:
SCREAMING_SNAKE_CASE : List[Any] = self._consolidate(batch[column_name] )
return batch | 25 |
from math import pi, sqrt, tan
def lowerCamelCase__ ( _a):
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values")
return 6 * side_length**2
def lowerCamelCase__ ( _a , _a , _a):
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values")
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase__ ( _a):
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values")
return 4 * pi * radius**2
def lowerCamelCase__ ( _a):
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values")
return 3 * pi * radius**2
def lowerCamelCase__ ( _a , _a):
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values")
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase__ ( _a , _a , _a):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values")
SCREAMING_SNAKE_CASE : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase__ ( _a , _a):
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values")
return 2 * pi * radius * (height + radius)
def lowerCamelCase__ ( _a , _a):
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values")
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori")
return 4 * pow(_a , 2) * torus_radius * tube_radius
def lowerCamelCase__ ( _a , _a):
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values")
return length * width
def lowerCamelCase__ ( _a):
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values")
return side_length**2
def lowerCamelCase__ ( _a , _a):
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values")
return (base * height) / 2
def lowerCamelCase__ ( _a , _a , _a):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values")
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle")
SCREAMING_SNAKE_CASE : List[str] = (sidea + sidea + sidea) / 2
SCREAMING_SNAKE_CASE : Optional[int] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea))
return area
def lowerCamelCase__ ( _a , _a):
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values")
return base * height
def lowerCamelCase__ ( _a , _a , _a):
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values")
return 1 / 2 * (basea + basea) * height
def lowerCamelCase__ ( _a):
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values")
return pi * radius**2
def lowerCamelCase__ ( _a , _a):
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values")
return pi * radius_x * radius_y
def lowerCamelCase__ ( _a , _a):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values")
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase__ ( _a , _a):
if not isinstance(_a , _a) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides")
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side")
return (sides * length**2) / (4 * tan(pi / sides))
return (sides * length**2) / (4 * tan(pi / sides))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F'''Rectangle: {area_rectangle(10, 20) = }''')
print(F'''Square: {area_square(10) = }''')
print(F'''Triangle: {area_triangle(10, 10) = }''')
print(F'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(F'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(F'''Rhombus: {area_rhombus(10, 20) = }''')
print(F'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(F'''Circle: {area_circle(20) = }''')
print(F'''Ellipse: {area_ellipse(10, 20) = }''')
print('\nSurface Areas of various geometric shapes: \n')
print(F'''Cube: {surface_area_cube(20) = }''')
print(F'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(F'''Sphere: {surface_area_sphere(20) = }''')
print(F'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(F'''Cone: {surface_area_cone(10, 20) = }''')
print(F'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(F'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(F'''Torus: {surface_area_torus(20, 10) = }''')
print(F'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(F'''Square: {area_reg_polygon(4, 10) = }''')
print(F'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''') | 25 | 1 |
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowerCamelCase__ ( _a):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items())
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Dict = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
SCREAMING_SNAKE_CASE : Tuple = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head")
SCREAMING_SNAKE_CASE : Dict = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head")
SCREAMING_SNAKE_CASE : List[str] = key.replace("heads.cmd.itm_head.cls" , "itm_head")
SCREAMING_SNAKE_CASE : List[Any] = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler")
SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale")
SCREAMING_SNAKE_CASE : Dict = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head")
SCREAMING_SNAKE_CASE : str = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head")
SCREAMING_SNAKE_CASE : str = key.replace("mm_text_projection" , "flava.text_to_mm_projection")
SCREAMING_SNAKE_CASE : List[Any] = key.replace("mm_image_projection" , "flava.image_to_mm_projection")
SCREAMING_SNAKE_CASE : Optional[int] = key.replace("image_encoder.module" , "flava.image_model")
SCREAMING_SNAKE_CASE : Tuple = key.replace("text_encoder.module" , "flava.text_model")
SCREAMING_SNAKE_CASE : Optional[Any] = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token")
SCREAMING_SNAKE_CASE : Dict = key.replace("mm_encoder.module" , "flava.multimodal_model")
SCREAMING_SNAKE_CASE : int = key.replace("text_projection" , "flava.text_projection")
SCREAMING_SNAKE_CASE : List[str] = key.replace("image_projection" , "flava.image_projection")
SCREAMING_SNAKE_CASE : Union[str, Any] = value.float()
for key, value in codebook_state_dict.items():
SCREAMING_SNAKE_CASE : Tuple = value
return upgrade
@torch.no_grad()
def lowerCamelCase__ ( _a , _a , _a , _a=None):
if config_path is not None:
SCREAMING_SNAKE_CASE : Any = FlavaConfig.from_pretrained(_a)
else:
SCREAMING_SNAKE_CASE : Tuple = FlavaConfig()
SCREAMING_SNAKE_CASE : Union[str, Any] = FlavaForPreTraining(_a).eval()
SCREAMING_SNAKE_CASE : int = convert_dalle_checkpoint(_a , _a , save_checkpoint=_a)
if os.path.exists(_a):
SCREAMING_SNAKE_CASE : List[Any] = torch.load(_a , map_location="cpu")
else:
SCREAMING_SNAKE_CASE : str = torch.hub.load_state_dict_from_url(_a , map_location="cpu")
SCREAMING_SNAKE_CASE : Tuple = upgrade_state_dict(_a , _a)
hf_model.load_state_dict(_a)
SCREAMING_SNAKE_CASE : List[Any] = hf_model.state_dict()
SCREAMING_SNAKE_CASE : Optional[int] = count_parameters(_a)
SCREAMING_SNAKE_CASE : Union[str, Any] = count_parameters(_a) + count_parameters(_a)
assert torch.allclose(_a , _a , atol=1E-3)
hf_model.save_pretrained(_a)
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
a_ = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path) | 25 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 25 | 1 |
import argparse
import datetime
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : List[str] = {
"0": "Sunday",
"1": "Monday",
"2": "Tuesday",
"3": "Wednesday",
"4": "Thursday",
"5": "Friday",
"6": "Saturday",
}
SCREAMING_SNAKE_CASE : List[str] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(_a) < 11:
raise ValueError("Must be 10 characters long")
# Get month
SCREAMING_SNAKE_CASE : int = int(date_input[0] + date_input[1])
# Validate
if not 0 < m < 13:
raise ValueError("Month must be between 1 - 12")
SCREAMING_SNAKE_CASE : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'")
# Get day
SCREAMING_SNAKE_CASE : int = int(date_input[3] + date_input[4])
# Validate
if not 0 < d < 32:
raise ValueError("Date must be between 1 - 31")
# Get second separator
SCREAMING_SNAKE_CASE : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'")
# Get year
SCREAMING_SNAKE_CASE : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9])
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
"Year out of range. There has to be some sort of limit...right?")
# Get datetime obj for validation
SCREAMING_SNAKE_CASE : Dict = datetime.date(int(_a) , int(_a) , int(_a))
# Start math
if m <= 2:
SCREAMING_SNAKE_CASE : List[str] = y - 1
SCREAMING_SNAKE_CASE : int = m + 12
# maths var
SCREAMING_SNAKE_CASE : int = int(str(_a)[:2])
SCREAMING_SNAKE_CASE : int = int(str(_a)[2:])
SCREAMING_SNAKE_CASE : int = int(2.6 * m - 5.39)
SCREAMING_SNAKE_CASE : int = int(c / 4)
SCREAMING_SNAKE_CASE : int = int(k / 4)
SCREAMING_SNAKE_CASE : int = int(d + k)
SCREAMING_SNAKE_CASE : int = int(t + u + v + x)
SCREAMING_SNAKE_CASE : int = int(z - (2 * c))
SCREAMING_SNAKE_CASE : int = round(w % 7)
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("The date was evaluated incorrectly. Contact developer.")
# Response
SCREAMING_SNAKE_CASE : str = f"Your date {date_input}, is a {days[str(_a)]}!"
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
a_ = parser.parse_args()
zeller(args.date_input) | 25 |
from __future__ import annotations
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Optional[Any] = 2
SCREAMING_SNAKE_CASE : Optional[int] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_a)
if n > 1:
factors.append(_a)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod() | 25 | 1 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a_ = logging.getLogger(__name__)
a_ = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
a_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCamelCase :
'''simple docstring'''
lowerCamelCase__ =field(
default=__A , metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
} , )
lowerCamelCase__ =field(
default=__A , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__A )} , )
lowerCamelCase__ =field(
default=__A , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCamelCase__ =field(
default=__A , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCamelCase__ =field(
default=__A , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
lowerCamelCase__ =field(
default=__A , metadata={'help': 'The input training data file (a text file).'} )
lowerCamelCase__ =field(
default=__A , metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
} , )
lowerCamelCase__ =field(
default=__A , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
lowerCamelCase__ =field(
default=__A , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , )
lowerCamelCase__ =field(
default=__A , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , )
lowerCamelCase__ =field(
default=__A , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , )
lowerCamelCase__ =field(
default=__A , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
lowerCamelCase__ =field(default=__A , metadata={'help': 'Whether ot not to use whole word mask.'} )
lowerCamelCase__ =field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
lowerCamelCase__ =field(
default=1 / 6 , metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
} , )
lowerCamelCase__ =field(
default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
lowerCamelCase__ =field(
default=-1 , metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
} , )
lowerCamelCase__ =field(
default=__A , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def lowerCamelCase__ ( _a , _a , _a = False , _a = None , ):
def _dataset(_a , _a=None):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask")
return LineByLineWithRefDataset(
tokenizer=_a , file_path=_a , block_size=args.block_size , ref_path=_a , )
return LineByLineTextDataset(tokenizer=_a , file_path=_a , block_size=args.block_size)
else:
return TextDataset(
tokenizer=_a , file_path=_a , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_a , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file)
elif args.train_data_files:
return ConcatDataset([_dataset(_a) for f in glob(args.train_data_files)])
else:
return _dataset(args.train_data_file , args.train_ref_file)
def lowerCamelCase__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument.")
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
" --overwrite_output_dir to overcome.")
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , _a)
# Set seed
set_seed(training_args.seed)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir)
else:
SCREAMING_SNAKE_CASE : Dict = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.tokenizer_name:
SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"
" script, save it,and load it from here, using --tokenizer_name")
if model_args.model_name_or_path:
SCREAMING_SNAKE_CASE : Tuple = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path) , config=_a , cache_dir=model_args.cache_dir , )
else:
logger.info("Training new model from scratch")
SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelWithLMHead.from_config(_a)
model.resize_token_embeddings(len(_a))
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"
"--mlm flag (masked language modeling).")
if data_args.block_size <= 0:
SCREAMING_SNAKE_CASE : str = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
SCREAMING_SNAKE_CASE : List[str] = min(data_args.block_size , tokenizer.max_len)
# Get datasets
SCREAMING_SNAKE_CASE : List[Any] = (
get_dataset(_a , tokenizer=_a , cache_dir=model_args.cache_dir) if training_args.do_train else None
)
SCREAMING_SNAKE_CASE : Optional[Any] = (
get_dataset(_a , tokenizer=_a , evaluate=_a , cache_dir=model_args.cache_dir)
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
SCREAMING_SNAKE_CASE : Any = DataCollatorForPermutationLanguageModeling(
tokenizer=_a , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
SCREAMING_SNAKE_CASE : Optional[int] = DataCollatorForWholeWordMask(
tokenizer=_a , mlm_probability=data_args.mlm_probability)
else:
SCREAMING_SNAKE_CASE : Any = DataCollatorForLanguageModeling(
tokenizer=_a , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability)
# Initialize our Trainer
SCREAMING_SNAKE_CASE : str = Trainer(
model=_a , args=_a , data_collator=_a , train_dataset=_a , eval_dataset=_a , prediction_loss_only=_a , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE : Optional[int] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path)
else None
)
trainer.train(model_path=_a)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
SCREAMING_SNAKE_CASE : int = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.evaluate()
SCREAMING_SNAKE_CASE : Any = math.exp(eval_output["eval_loss"])
SCREAMING_SNAKE_CASE : Any = {"perplexity": perplexity}
SCREAMING_SNAKE_CASE : Dict = os.path.join(training_args.output_dir , "eval_results_lm.txt")
if trainer.is_world_master():
with open(_a , "w") as writer:
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s" , _a , str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
results.update(_a)
return results
def lowerCamelCase__ ( _a):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 25 |
from math import factorial, pi
def lowerCamelCase__ ( _a , _a = 30):
if not isinstance(_a , (int, float)):
raise ValueError("maclaurin_sin() requires either an int or float for theta")
if not isinstance(_a , _a) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy")
SCREAMING_SNAKE_CASE : int = float(_a)
SCREAMING_SNAKE_CASE : Dict = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1) for r in range(_a))
def lowerCamelCase__ ( _a , _a = 30):
if not isinstance(_a , (int, float)):
raise ValueError("maclaurin_cos() requires either an int or float for theta")
if not isinstance(_a , _a) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy")
SCREAMING_SNAKE_CASE : str = float(_a)
SCREAMING_SNAKE_CASE : Any = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r) for r in range(_a))
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15)) | 25 | 1 |
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if len(snake_case ) != len(snake_case ):
raise ValueError('''The length of profit and weight must be same.''' )
if max_weight <= 0:
raise ValueError('''max_weight must greater than zero.''' )
if any(p < 0 for p in profit ):
raise ValueError('''Profit can not be negative.''' )
if any(w < 0 for w in weight ):
raise ValueError('''Weight can not be negative.''' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
__magic_name__ :List[Any] = [p / w for p, w in zip(snake_case, snake_case )]
# Creating a copy of the list and sorting profit/weight in ascending order
__magic_name__ :Tuple = sorted(snake_case )
# declaring useful variables
__magic_name__ :str = len(snake_case )
__magic_name__ :List[str] = 0
__magic_name__ :List[str] = 0
__magic_name__ :Union[str, Any] = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
__magic_name__ :List[Any] = sorted_profit_by_weight[length - i - 1]
__magic_name__ :int = profit_by_weight.index(snake_case )
__magic_name__ :Any = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = [int(x) for x in input("""Input profits separated by spaces: """).split()]
SCREAMING_SNAKE_CASE__ : Tuple = [int(x) for x in input("""Input weights separated by spaces: """).split()]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 0 |
from __future__ import annotations
import math
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , a : int ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = size
# approximate the overall size of segment tree with given value
SCREAMING_SNAKE_CASE : Any = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
SCREAMING_SNAKE_CASE : Union[str, Any] = [0 for i in range(0 , 4 * size )]
SCREAMING_SNAKE_CASE : Any = [0 for i in range(0 , 4 * size )] # flag for lazy update
def __UpperCamelCase ( self : Tuple , a : int ) -> int:
"""simple docstring"""
return idx * 2
def __UpperCamelCase ( self : str , a : int ) -> int:
"""simple docstring"""
return idx * 2 + 1
def __UpperCamelCase ( self : int , a : int , a : int , a : int , a : list[int] ) -> None:
"""simple docstring"""
if left_element == right_element:
SCREAMING_SNAKE_CASE : int = a[left_element - 1]
else:
SCREAMING_SNAKE_CASE : Optional[int] = (left_element + right_element) // 2
self.build(self.left(a ) , a , a , a )
self.build(self.right(a ) , mid + 1 , a , a )
SCREAMING_SNAKE_CASE : List[Any] = max(
self.segment_tree[self.left(a )] , self.segment_tree[self.right(a )] )
def __UpperCamelCase ( self : Optional[Any] , a : int , a : int , a : int , a : int , a : int , a : int ) -> bool:
"""simple docstring"""
if self.flag[idx] is True:
SCREAMING_SNAKE_CASE : Any = self.lazy[idx]
SCREAMING_SNAKE_CASE : List[str] = False
if left_element != right_element:
SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx]
SCREAMING_SNAKE_CASE : int = self.lazy[idx]
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : List[Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
SCREAMING_SNAKE_CASE : Optional[Any] = val
if left_element != right_element:
SCREAMING_SNAKE_CASE : str = val
SCREAMING_SNAKE_CASE : str = val
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : Optional[Any] = True
return True
SCREAMING_SNAKE_CASE : int = (left_element + right_element) // 2
self.update(self.left(a ) , a , a , a , a , a )
self.update(self.right(a ) , mid + 1 , a , a , a , a )
SCREAMING_SNAKE_CASE : Optional[int] = max(
self.segment_tree[self.left(a )] , self.segment_tree[self.right(a )] )
return True
def __UpperCamelCase ( self : Dict , a : int , a : int , a : int , a : int , a : int ) -> int | float:
"""simple docstring"""
if self.flag[idx] is True:
SCREAMING_SNAKE_CASE : int = self.lazy[idx]
SCREAMING_SNAKE_CASE : List[Any] = False
if left_element != right_element:
SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx]
SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx]
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
SCREAMING_SNAKE_CASE : Dict = (left_element + right_element) // 2
SCREAMING_SNAKE_CASE : Tuple = self.query(self.left(a ) , a , a , a , a )
SCREAMING_SNAKE_CASE : Tuple = self.query(self.right(a ) , mid + 1 , a , a , a )
return max(a , a )
def __str__( self : str ) -> str:
"""simple docstring"""
return str([self.query(1 , 1 , self.size , a , a ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
a_ = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
a_ = 15
a_ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt) | 25 | 0 |
def _A ( _lowercase ) -> list:
"""simple docstring"""
__UpperCamelCase = len(_lowercase )
for i in range(1 , _lowercase ):
__UpperCamelCase = collection[i]
__UpperCamelCase = 0
__UpperCamelCase = i - 1
while low <= high:
__UpperCamelCase = (low + high) // 2
if val < collection[mid]:
__UpperCamelCase = mid - 1
else:
__UpperCamelCase = mid + 1
for j in range(_lowercase , _lowercase , -1 ):
__UpperCamelCase = collection[j - 1]
__UpperCamelCase = val
return collection
if __name__ == "__main__":
__snake_case = input('''Enter numbers separated by a comma:\n''').strip()
__snake_case = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
sd_pipe.set_scheduler("sample_euler" )
SCREAMING_SNAKE_CASE : Optional[int] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = sd_pipe([prompt] , generator=a , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
SCREAMING_SNAKE_CASE : Tuple = output.images
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
sd_pipe.set_scheduler("sample_euler" )
SCREAMING_SNAKE_CASE : List[str] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe([prompt] , generator=a , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
SCREAMING_SNAKE_CASE : List[Any] = output.images
SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
SCREAMING_SNAKE_CASE : str = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = sd_pipe(
[prompt] , generator=a , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=a , )
SCREAMING_SNAKE_CASE : str = output.images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 25 | 0 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : torch.FloatTensor
a__ : Optional[torch.FloatTensor] = None
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] , _snake_case :List[Any]=0.999 , _snake_case :Optional[int]="cosine" , ) -> Dict:
if alpha_transform_type == "cosine":
def alpha_bar_fn(_snake_case :Any ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_snake_case :str ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_A = []
for i in range(_snake_case ):
_A = i / num_diffusion_timesteps
_A = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_snake_case ) / alpha_bar_fn(_snake_case ) , _snake_case ) )
return torch.tensor(_snake_case , dtype=torch.floataa )
class lowerCamelCase__ ( _A , _A):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[Any] , __lowerCAmelCase : int = 10_00 , __lowerCAmelCase : str = "fixed_small_log" , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[float] = 1.0 , __lowerCAmelCase : str = "epsilon" , __lowerCAmelCase : str = "squaredcos_cap_v2" , ) -> Optional[int]:
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' )
_A = betas_for_alpha_bar(__lowerCAmelCase )
_A = 1.0 - self.betas
_A = torch.cumprod(self.alphas , dim=0 )
_A = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
_A = 1.0
# setable values
_A = None
_A = torch.from_numpy(np.arange(0 , __lowerCAmelCase )[::-1].copy() )
_A = variance_type
def snake_case_ ( self : List[Any] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[int] = None ) -> torch.FloatTensor:
return sample
def snake_case_ ( self : Any , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, torch.device] = None ) -> Optional[Any]:
_A = num_inference_steps
_A = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
_A = (np.arange(0 , __lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
_A = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase )
def snake_case_ ( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Tuple=None ) -> List[str]:
if prev_timestep is None:
_A = t - 1
_A = self.alphas_cumprod[t]
_A = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_A = 1 - alpha_prod_t
_A = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_A = self.betas[t]
else:
_A = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance ฮฒt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_A = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
_A = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
_A = torch.log(torch.clamp(__lowerCAmelCase , min=1E-20 ) )
_A = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
_A = variance.log()
_A = beta.log()
_A = (predicted_variance + 1) / 2
_A = frac * max_log + (1 - frac) * min_log
return variance
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : int , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : bool = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
_A = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
_A , _A = torch.split(__lowerCAmelCase , sample.shape[1] , dim=1 )
else:
_A = None
# 1. compute alphas, betas
if prev_timestep is None:
_A = t - 1
_A = self.alphas_cumprod[t]
_A = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_A = 1 - alpha_prod_t
_A = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_A = self.betas[t]
_A = self.alphas[t]
else:
_A = 1 - alpha_prod_t / alpha_prod_t_prev
_A = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_A = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
''' for the UnCLIPScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_A = torch.clamp(
__lowerCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_A = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
_A = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample ยต_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_A = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_A = 0
if t > 0:
_A = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=__lowerCAmelCase , device=model_output.device )
_A = self._get_variance(
__lowerCAmelCase , predicted_variance=__lowerCAmelCase , prev_timestep=__lowerCAmelCase , )
if self.variance_type == "fixed_small_log":
_A = variance
elif self.variance_type == "learned_range":
_A = (0.5 * variance).exp()
else:
raise ValueError(
f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
''' for the UnCLIPScheduler.''' )
_A = variance * variance_noise
_A = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__lowerCAmelCase , pred_original_sample=__lowerCAmelCase )
def snake_case_ ( self : List[str] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.IntTensor , ) -> torch.FloatTensor:
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
_A = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
_A = timesteps.to(original_samples.device )
_A = alphas_cumprod[timesteps] ** 0.5
_A = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
_A = sqrt_alpha_prod.unsqueeze(-1 )
_A = (1 - alphas_cumprod[timesteps]) ** 0.5
_A = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
_A = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
_A = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 2 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = ort.SessionOptions()
SCREAMING_SNAKE_CASE : Union[str, Any] = False
return options
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
SCREAMING_SNAKE_CASE : int = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Optional[Any] = "A red cat sitting on a park bench"
SCREAMING_SNAKE_CASE : Tuple = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(
prompt=a , image=a , mask_image=a , guidance_scale=7.5 , num_inference_steps=10 , generator=a , output_type="np" , )
SCREAMING_SNAKE_CASE : List[Any] = output.images
SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
SCREAMING_SNAKE_CASE : Union[str, Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=a , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : List[Any] = "A red cat sitting on a park bench"
SCREAMING_SNAKE_CASE : Dict = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe(
prompt=a , image=a , mask_image=a , guidance_scale=7.5 , num_inference_steps=20 , generator=a , output_type="np" , )
SCREAMING_SNAKE_CASE : List[str] = output.images
SCREAMING_SNAKE_CASE : Optional[int] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 | 25 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( snake_case_ , unittest.TestCase):
lowerCAmelCase_ = AudioLDMPipeline
lowerCAmelCase_ = TEXT_TO_AUDIO_PARAMS
lowerCAmelCase_ = TEXT_TO_AUDIO_BATCH_PARAMS
lowerCAmelCase_ = frozenset(
[
"""num_inference_steps""",
"""num_waveforms_per_prompt""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
])
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(32, 64) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=A_ , )
UpperCamelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=A_ , set_alpha_to_one=A_ , )
torch.manual_seed(0 )
UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
UpperCamelCase = ClapTextModelWithProjection(A_ )
UpperCamelCase = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=77 )
UpperCamelCase = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=A_ , )
UpperCamelCase = SpeechTaHifiGan(A_ )
UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def UpperCAmelCase_ ( self , A_ , A_=0 )-> Optional[int]:
'''simple docstring'''
if str(A_ ).startswith('mps' ):
UpperCamelCase = torch.manual_seed(A_ )
else:
UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = AudioLDMPipeline(**A_ )
UpperCamelCase = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = self.get_dummy_inputs(A_ )
UpperCamelCase = audioldm_pipe(**A_ )
UpperCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(A_ ) == 256
UpperCamelCase = audio[:10]
UpperCamelCase = np.array(
[-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = AudioLDMPipeline(**A_ )
UpperCamelCase = audioldm_pipe.to(A_ )
UpperCamelCase = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = self.get_dummy_inputs(A_ )
UpperCamelCase = 3 * [inputs['prompt']]
# forward
UpperCamelCase = audioldm_pipe(**A_ )
UpperCamelCase = output.audios[0]
UpperCamelCase = self.get_dummy_inputs(A_ )
UpperCamelCase = 3 * [inputs.pop('prompt' )]
UpperCamelCase = audioldm_pipe.tokenizer(
A_ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=A_ , return_tensors='pt' , )
UpperCamelCase = text_inputs['input_ids'].to(A_ )
UpperCamelCase = audioldm_pipe.text_encoder(
A_ , )
UpperCamelCase = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCamelCase = F.normalize(A_ , dim=-1 )
UpperCamelCase = prompt_embeds
# forward
UpperCamelCase = audioldm_pipe(**A_ )
UpperCamelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = AudioLDMPipeline(**A_ )
UpperCamelCase = audioldm_pipe.to(A_ )
UpperCamelCase = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = self.get_dummy_inputs(A_ )
UpperCamelCase = 3 * ['this is a negative prompt']
UpperCamelCase = negative_prompt
UpperCamelCase = 3 * [inputs['prompt']]
# forward
UpperCamelCase = audioldm_pipe(**A_ )
UpperCamelCase = output.audios[0]
UpperCamelCase = self.get_dummy_inputs(A_ )
UpperCamelCase = 3 * [inputs.pop('prompt' )]
UpperCamelCase = []
for p in [prompt, negative_prompt]:
UpperCamelCase = audioldm_pipe.tokenizer(
A_ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=A_ , return_tensors='pt' , )
UpperCamelCase = text_inputs['input_ids'].to(A_ )
UpperCamelCase = audioldm_pipe.text_encoder(
A_ , )
UpperCamelCase = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCamelCase = F.normalize(A_ , dim=-1 )
embeds.append(A_ )
UpperCamelCase , UpperCamelCase = embeds
# forward
UpperCamelCase = audioldm_pipe(**A_ )
UpperCamelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = PNDMScheduler(skip_prk_steps=A_ )
UpperCamelCase = AudioLDMPipeline(**A_ )
UpperCamelCase = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = self.get_dummy_inputs(A_ )
UpperCamelCase = 'egg cracking'
UpperCamelCase = audioldm_pipe(**A_ , negative_prompt=A_ )
UpperCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(A_ ) == 256
UpperCamelCase = audio[:10]
UpperCamelCase = np.array(
[-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = PNDMScheduler(skip_prk_steps=A_ )
UpperCamelCase = AudioLDMPipeline(**A_ )
UpperCamelCase = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
UpperCamelCase = audioldm_pipe(A_ , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
UpperCamelCase = 2
UpperCamelCase = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
UpperCamelCase = 2
UpperCamelCase = audioldm_pipe(A_ , num_inference_steps=2 , num_waveforms_per_prompt=A_ ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
UpperCamelCase = 2
UpperCamelCase = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=A_ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = AudioLDMPipeline(**A_ )
UpperCamelCase = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = audioldm_pipe.vocoder.config.sampling_rate
UpperCamelCase = self.get_dummy_inputs(A_ )
UpperCamelCase = audioldm_pipe(audio_length_in_s=0.016 , **A_ )
UpperCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(A_ ) / vocoder_sampling_rate == 0.016
UpperCamelCase = audioldm_pipe(audio_length_in_s=0.032 , **A_ )
UpperCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(A_ ) / vocoder_sampling_rate == 0.032
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = AudioLDMPipeline(**A_ )
UpperCamelCase = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = ['hey']
UpperCamelCase = audioldm_pipe(A_ , num_inference_steps=1 )
UpperCamelCase = output.audios.shape
assert audio_shape == (1, 256)
UpperCamelCase = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
UpperCamelCase = SpeechTaHifiGan(A_ ).to(A_ )
UpperCamelCase = audioldm_pipe(A_ , num_inference_steps=1 )
UpperCamelCase = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A_ )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
self._test_inference_batch_single_identical(test_mean_pixel_difference=A_ )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_ )
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self , A_ , A_="cpu" , A_=torch.floataa , A_=0 )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase = np.random.RandomState(A_ ).standard_normal((1, 8, 128, 16) )
UpperCamelCase = torch.from_numpy(A_ ).to(device=A_ , dtype=A_ )
UpperCamelCase = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
UpperCamelCase = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = self.get_inputs(A_ )
UpperCamelCase = 25
UpperCamelCase = audioldm_pipe(**A_ ).audios[0]
assert audio.ndim == 1
assert len(A_ ) == 81920
UpperCamelCase = audio[77230:77240]
UpperCamelCase = np.array(
[-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315] )
UpperCamelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
UpperCamelCase = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
UpperCamelCase = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = self.get_inputs(A_ )
UpperCamelCase = audioldm_pipe(**A_ ).audios[0]
assert audio.ndim == 1
assert len(A_ ) == 81920
UpperCamelCase = audio[27780:27790]
UpperCamelCase = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212] )
UpperCamelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 3 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCamelCase__ ( _a):
return getitem, k
def lowerCamelCase__ ( _a , _a):
return setitem, k, v
def lowerCamelCase__ ( _a):
return delitem, k
def lowerCamelCase__ ( _a , _a , *_a):
try:
return fun(_a , *_a), None
except Exception as e:
return None, e
a_ = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
a_ = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
a_ = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
a_ = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
a_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items"),
pytest.param(_overwrite_items , id="overwrite items"),
pytest.param(_delete_items , id="delete items"),
pytest.param(_access_absent_items , id="access absent items"),
pytest.param(_add_with_resize_up , id="add with resize up"),
pytest.param(_add_with_resize_down , id="add with resize down"),
) , )
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Dict = HashMap(initial_block_size=4)
SCREAMING_SNAKE_CASE : List[str] = {}
for _, (fun, *args) in enumerate(_a):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = _run_operation(_a , _a , *_a)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = _run_operation(_a , _a , *_a)
assert my_res == py_res
assert str(_a) == str(_a)
assert set(_a) == set(_a)
assert len(_a) == len(_a)
assert set(my.items()) == set(py.items())
def lowerCamelCase__ ( ):
def is_public(_a) -> bool:
return not name.startswith("_")
SCREAMING_SNAKE_CASE : List[str] = {name for name in dir({}) if is_public(_a)}
SCREAMING_SNAKE_CASE : Union[str, Any] = {name for name in dir(HashMap()) if is_public(_a)}
assert dict_public_names > hash_public_names | 25 | 0 |
"""simple docstring"""
import os
from collections.abc import Iterator
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ):
for dir_path, dir_names, filenames in os.walk(_UpperCAmelCase ):
lowerCAmelCase = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_UpperCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase ).lstrip('./' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] ):
return F'{i * " "}*' if i else "\n##"
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_UpperCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F'{md_prefix(_UpperCAmelCase )} {new_part.replace("_" , " " ).title()}' )
return new_path
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ):
lowerCAmelCase = ''
for filepath in sorted(good_file_paths(_UpperCAmelCase ) ):
lowerCAmelCase ,lowerCAmelCase = os.path.split(_UpperCAmelCase )
if filepath != old_path:
lowerCAmelCase = print_path(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = (filepath.count(os.sep ) + 1) if filepath else 0
lowerCAmelCase = F'{filepath}/{filename}'.replace(' ' , '%20' )
lowerCAmelCase = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(F'{md_prefix(_UpperCAmelCase )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('''.''')
| 4 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {'configuration_van': ['VAN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VanConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'VAN_PRETRAINED_MODEL_ARCHIVE_LIST',
'VanForImageClassification',
'VanModel',
'VanPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 25 | 0 |
'''simple docstring'''
import functools
def A (__lowerCamelCase :list[int] , __lowerCamelCase :list[int] ):
# Validation
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not all(isinstance(__lowerCamelCase , __lowerCamelCase ) for day in days ):
raise ValueError("""The parameter days should be a list of integers""" )
if len(__lowerCamelCase ) != 3 or not all(isinstance(__lowerCamelCase , __lowerCamelCase ) for cost in costs ):
raise ValueError("""The parameter costs should be a list of three integers""" )
if len(__lowerCamelCase ) == 0:
return 0
if min(__lowerCamelCase ) <= 0:
raise ValueError("""All days elements should be greater than 0""" )
if max(__lowerCamelCase ) >= 366:
raise ValueError("""All days elements should be less than 366""" )
_lowerCAmelCase = set(__lowerCamelCase )
@functools.cache
def dynamic_programming(__lowerCamelCase :int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 |
from __future__ import annotations
def lowerCamelCase__ ( _a):
if len(_a) == 0:
return []
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = min(_a), max(_a)
SCREAMING_SNAKE_CASE : Dict = int(max_value - min_value) + 1
SCREAMING_SNAKE_CASE : list[list] = [[] for _ in range(_a)]
for i in my_list:
buckets[int(i - min_value)].append(_a)
return [v for bucket in buckets for v in sorted(_a)]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15] | 25 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 |
a_ = frozenset(
[
'prompt',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
a_ = frozenset(['prompt', 'negative_prompt'])
a_ = frozenset([])
a_ = frozenset(['image'])
a_ = frozenset(
[
'image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['image'])
a_ = frozenset(
[
'prompt',
'image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
a_ = frozenset(['prompt', 'image', 'negative_prompt'])
a_ = frozenset(
[
# Text guided image variation with an image mask
'prompt',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
a_ = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt'])
a_ = frozenset(
[
# image variation with an image mask
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['image', 'mask_image'])
a_ = frozenset(
[
'example_image',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['example_image', 'image', 'mask_image'])
a_ = frozenset(['class_labels'])
a_ = frozenset(['class_labels'])
a_ = frozenset(['batch_size'])
a_ = frozenset([])
a_ = frozenset(['batch_size'])
a_ = frozenset([])
a_ = frozenset(
[
'prompt',
'audio_length_in_s',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
a_ = frozenset(['prompt', 'negative_prompt'])
a_ = frozenset(['input_tokens'])
a_ = frozenset(['input_tokens']) | 25 | 0 |
"""simple docstring"""
def _snake_case ( _snake_case : int , _snake_case : int ) -> int:
'''simple docstring'''
while second != 0:
_A = first & second
first ^= second
_A = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
a = int(input('''Enter the first number: ''').strip())
a = int(input('''Enter the second number: ''').strip())
print(F'''{add(first, second) = }''')
| 7 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a_ = get_logger()
a_ = None
class _UpperCamelCase ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
'''simple docstring'''
def __init__( self : Optional[Any] , a : str=None , a : List[Any]=None , **a : Any ) -> Optional[Any]:
"""simple docstring"""
super().__init__(features=a )
import jax
from jaxlib.xla_client import Device
if isinstance(a , a ):
raise ValueError(
F"Expected {device} to be a `str` not {type(a )}, as `jaxlib.xla_extension.Device` "
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
SCREAMING_SNAKE_CASE : List[str] = device if isinstance(a , a ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE : str = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"Device with string identifier {self.device} not listed among the available "
F"devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default "
F"device: {str(jax.devices()[0] )}." )
SCREAMING_SNAKE_CASE : Any = str(jax.devices()[0] )
SCREAMING_SNAKE_CASE : Any = jnp_array_kwargs
@staticmethod
def __UpperCamelCase ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
"""simple docstring"""
import jax
return {str(a ): device for device in jax.devices()}
def __UpperCamelCase ( self : Dict , a : Tuple ) -> str:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(a , a ) and column:
if all(
isinstance(a , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(a , axis=0 )
return column
def __UpperCamelCase ( self : Dict , a : str ) -> str:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(a , (str, bytes, type(a )) ):
return value
elif isinstance(a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
SCREAMING_SNAKE_CASE : Dict = {"dtype": jnp.intaa}
else:
SCREAMING_SNAKE_CASE : str = {"dtype": jnp.intaa}
elif isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
SCREAMING_SNAKE_CASE : int = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a , PIL.Image.Image ):
SCREAMING_SNAKE_CASE : Dict = np.asarray(a )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE : Optional[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(a , **{**default_dtype, **self.jnp_array_kwargs} )
def __UpperCamelCase ( self : Any , a : List[str] ) -> Dict:
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(a , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(a , "__array__" ) and not isinstance(a , jax.Array ):
SCREAMING_SNAKE_CASE : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
elif isinstance(a , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
return self._tensorize(a )
def __UpperCamelCase ( self : Optional[Any] , a : dict ) -> Dict:
"""simple docstring"""
return map_nested(self._recursive_tensorize , a , map_list=a )
def __UpperCamelCase ( self : Dict , a : pa.Table ) -> Mapping:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.numpy_arrow_extractor().extract_row(a )
SCREAMING_SNAKE_CASE : List[Any] = self.python_features_decoder.decode_row(a )
return self.recursive_tensorize(a )
def __UpperCamelCase ( self : Optional[int] , a : pa.Table ) -> "jax.Array":
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.numpy_arrow_extractor().extract_column(a )
SCREAMING_SNAKE_CASE : Optional[Any] = self.python_features_decoder.decode_column(a , pa_table.column_names[0] )
SCREAMING_SNAKE_CASE : Tuple = self.recursive_tensorize(a )
SCREAMING_SNAKE_CASE : Optional[int] = self._consolidate(a )
return column
def __UpperCamelCase ( self : List[Any] , a : pa.Table ) -> Mapping:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.numpy_arrow_extractor().extract_batch(a )
SCREAMING_SNAKE_CASE : str = self.python_features_decoder.decode_batch(a )
SCREAMING_SNAKE_CASE : List[Any] = self.recursive_tensorize(a )
for column_name in batch:
SCREAMING_SNAKE_CASE : List[Any] = self._consolidate(batch[column_name] )
return batch | 25 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _lowerCAmelCase ( __snake_case : Union[str, Any] ) -> Optional[int]:
__A : str = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class SCREAMING_SNAKE_CASE (a__ , a__ , a__ , unittest.TestCase ):
lowerCAmelCase = StableDiffusionLatentUpscalePipeline
lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''height''',
'''width''',
'''cross_attention_kwargs''',
'''negative_prompt_embeds''',
'''prompt_embeds''',
}
lowerCAmelCase = PipelineTesterMixin.required_optional_params - {'''num_images_per_prompt'''}
lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase = frozenset([] )
lowerCAmelCase = True
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = 1
__A : str = 4
__A : Dict = (16, 16)
__A : List[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(_UpperCAmelCase)
return image
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
torch.manual_seed(0)
__A : Dict = UNetaDConditionModel(
act_fn='gelu' , attention_head_dim=8 , norm_num_groups=_UpperCAmelCase , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
) , in_channels=8 , mid_block_type=_UpperCAmelCase , only_cross_attention=_UpperCAmelCase , out_channels=5 , resnet_time_scale_shift='scale_shift' , time_embedding_type='fourier' , timestep_post_act='gelu' , up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D') , )
__A : int = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
__A : List[Any] = EulerDiscreteScheduler(prediction_type='sample')
__A : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='quick_gelu' , projection_dim=512 , )
__A : List[str] = CLIPTextModel(_UpperCAmelCase)
__A : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
__A : List[str] = {
'unet': model.eval(),
'vae': vae.eval(),
'scheduler': scheduler,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=0):
'''simple docstring'''
if str(_UpperCAmelCase).startswith('mps'):
__A : Optional[Any] = torch.manual_seed(_UpperCAmelCase)
else:
__A : Tuple = torch.Generator(device=_UpperCAmelCase).manual_seed(_UpperCAmelCase)
__A : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': self.dummy_image.cpu(),
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = 'cpu'
__A : str = self.get_dummy_components()
__A : Optional[Any] = self.pipeline_class(**_UpperCAmelCase)
pipe.to(_UpperCAmelCase)
pipe.set_progress_bar_config(disable=_UpperCAmelCase)
__A : Optional[Any] = self.get_dummy_inputs(_UpperCAmelCase)
__A : List[str] = pipe(**_UpperCAmelCase).images
__A : Union[str, Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3))
__A : Any = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055])
__A : List[Any] = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(_UpperCAmelCase , 1e-3)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=7e-3)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3e-3)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3e-3)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = [
'DDIMScheduler',
'DDPMScheduler',
'PNDMScheduler',
'HeunDiscreteScheduler',
'EulerAncestralDiscreteScheduler',
'KDPM2DiscreteScheduler',
'KDPM2AncestralDiscreteScheduler',
'DPMSolverSDEScheduler',
]
__A : int = self.get_dummy_components()
__A : Union[str, Any] = self.pipeline_class(**_UpperCAmelCase)
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_UpperCAmelCase)
pipe.to(_UpperCAmelCase)
pipe.set_progress_bar_config(disable=_UpperCAmelCase)
__A : int = self.get_dummy_inputs(_UpperCAmelCase)
__A : Union[str, Any] = 2
__A : List[str] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
__A : List[str] = getattr(_UpperCAmelCase , scheduler_enum.name)
__A : Optional[Any] = scheduler_cls.from_config(pipe.scheduler.config)
__A : Any = pipe(**_UpperCAmelCase)[0]
outputs.append(_UpperCAmelCase)
assert check_same_shape(_UpperCAmelCase)
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = torch.manual_seed(33)
__A : List[str] = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' , torch_dtype=torch.floataa)
pipe.to('cuda')
__A : List[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa)
upscaler.to('cuda')
__A : Any = 'a photo of an astronaut high resolution, unreal engine, ultra realistic'
__A : Any = pipe(_UpperCAmelCase , generator=_UpperCAmelCase , output_type='latent').images
__A : int = upscaler(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , num_inference_steps=20 , guidance_scale=0 , generator=_UpperCAmelCase , output_type='np' , ).images[0]
__A : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy')
assert np.abs((expected_image - image).mean()) < 5e-2
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = torch.manual_seed(33)
__A : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa)
upscaler.to('cuda')
__A : Any = 'the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'
__A : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png')
__A : int = upscaler(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , num_inference_steps=20 , guidance_scale=0 , generator=_UpperCAmelCase , output_type='np' , ).images[0]
__A : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy')
assert np.abs((expected_image - image).max()) < 5e-2 | 8 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _UpperCamelCase :
'''simple docstring'''
@staticmethod
def __UpperCamelCase ( *a : str , **a : int ) -> str:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =MODEL_FOR_OBJECT_DETECTION_MAPPING
def __UpperCamelCase ( self : Optional[Any] , a : str , a : Optional[Any] , a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = ObjectDetectionPipeline(model=a , image_processor=a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __UpperCamelCase ( self : List[Any] , a : Optional[int] , a : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(a ) , 0 )
for detected_object in outputs:
self.assertEqual(
a , {
"score": ANY(a ),
"label": ANY(a ),
"box": {"xmin": ANY(a ), "ymin": ANY(a ), "xmax": ANY(a ), "ymax": ANY(a )},
} , )
import datasets
SCREAMING_SNAKE_CASE : Any = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
SCREAMING_SNAKE_CASE : Dict = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
SCREAMING_SNAKE_CASE : Tuple = object_detector(a , threshold=0.0 )
self.assertEqual(len(a ) , len(a ) )
for outputs in batch_outputs:
self.assertGreater(len(a ) , 0 )
for detected_object in outputs:
self.assertEqual(
a , {
"score": ANY(a ),
"label": ANY(a ),
"box": {"xmin": ANY(a ), "ymin": ANY(a ), "xmax": ANY(a ), "ymax": ANY(a )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def __UpperCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
pass
@require_torch
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = "hf-internal-testing/tiny-detr-mobilenetsv3"
SCREAMING_SNAKE_CASE : Dict = AutoModelForObjectDetection.from_pretrained(a )
SCREAMING_SNAKE_CASE : Any = AutoFeatureExtractor.from_pretrained(a )
SCREAMING_SNAKE_CASE : Tuple = ObjectDetectionPipeline(model=a , feature_extractor=a )
SCREAMING_SNAKE_CASE : int = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
SCREAMING_SNAKE_CASE : Dict = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(a )
SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained(a )
SCREAMING_SNAKE_CASE : int = ObjectDetectionPipeline(model=a , feature_extractor=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
SCREAMING_SNAKE_CASE : int = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : Tuple = pipeline("object-detection" , model=a )
SCREAMING_SNAKE_CASE : Tuple = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
SCREAMING_SNAKE_CASE : str = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = 0.9985
SCREAMING_SNAKE_CASE : int = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : List[str] = pipeline("object-detection" , model=a )
SCREAMING_SNAKE_CASE : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=a )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = "Narsil/layoutlmv3-finetuned-funsd"
SCREAMING_SNAKE_CASE : Dict = 0.9993
SCREAMING_SNAKE_CASE : str = pipeline("object-detection" , model=a , threshold=a )
SCREAMING_SNAKE_CASE : List[Any] = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , ) | 25 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''naver-clova-ix/donut-base''': '''https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json''',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : str = "donut-swin"
A__ : Tuple = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : int , _snake_case : int=2_24 , _snake_case : Optional[Any]=4 , _snake_case : int=3 , _snake_case : Optional[Any]=96 , _snake_case : Tuple=[2, 2, 6, 2] , _snake_case : List[str]=[3, 6, 12, 24] , _snake_case : List[Any]=7 , _snake_case : Union[str, Any]=4.0 , _snake_case : List[Any]=True , _snake_case : Optional[int]=0.0 , _snake_case : Optional[Any]=0.0 , _snake_case : Any=0.1 , _snake_case : Tuple="gelu" , _snake_case : str=False , _snake_case : Union[str, Any]=0.02 , _snake_case : List[str]=1E-5 , **_snake_case : List[str] , ):
"""simple docstring"""
super().__init__(**_snake_case )
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = embed_dim
A__ = depths
A__ = len(_snake_case )
A__ = num_heads
A__ = window_size
A__ = mlp_ratio
A__ = qkv_bias
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = drop_path_rate
A__ = hidden_act
A__ = use_absolute_embeddings
A__ = layer_norm_eps
A__ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A__ = int(embed_dim * 2 ** (len(_snake_case ) - 1) )
| 9 |
def lowerCamelCase__ ( _a):
if not isinstance(_a , _a):
SCREAMING_SNAKE_CASE : Tuple = f"Input value of [number={number}] must be an integer"
raise TypeError(_a)
if number < 0:
return False
SCREAMING_SNAKE_CASE : Union[str, Any] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 25 | 0 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = 42
class lowerCAmelCase_ ( __lowercase, __lowercase ):
@register_to_config
def __init__( self : int , _A : int = 32 , _A : int = 64 , _A : int = 20 , _A : int = 768 , _A : Union[str, Any]=77 , _A : Optional[Any]=4 , _A : float = 0.0 , _A : str = "silu" , _A : Optional[str] = None , _A : Optional[str] = None , _A : Optional[str] = "linear" , _A : Optional[str] = "prd" , _A : Optional[int] = None , _A : Optional[int] = None , _A : Optional[int] = None , ):
super().__init__()
_UpperCamelCase = num_attention_heads
_UpperCamelCase = attention_head_dim
_UpperCamelCase = num_attention_heads * attention_head_dim
_UpperCamelCase = additional_embeddings
_UpperCamelCase = time_embed_dim or inner_dim
_UpperCamelCase = embedding_proj_dim or embedding_dim
_UpperCamelCase = clip_embed_dim or embedding_dim
_UpperCamelCase = Timesteps(_A , _A , 0 )
_UpperCamelCase = TimestepEmbedding(_A , _A , out_dim=_A , act_fn=_A )
_UpperCamelCase = nn.Linear(_A , _A )
if embedding_proj_norm_type is None:
_UpperCamelCase = None
elif embedding_proj_norm_type == "layer":
_UpperCamelCase = nn.LayerNorm(_A )
else:
raise ValueError(F"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_UpperCamelCase = nn.Linear(_A , _A )
if encoder_hid_proj_type is None:
_UpperCamelCase = None
elif encoder_hid_proj_type == "linear":
_UpperCamelCase = nn.Linear(_A , _A )
else:
raise ValueError(F"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_UpperCamelCase = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , _A ) )
if added_emb_type == "prd":
_UpperCamelCase = nn.Parameter(torch.zeros(1 , 1 , _A ) )
elif added_emb_type is None:
_UpperCamelCase = None
else:
raise ValueError(
F"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_UpperCamelCase = nn.ModuleList(
[
BasicTransformerBlock(
_A , _A , _A , dropout=_A , activation_fn='''gelu''' , attention_bias=_A , )
for d in range(_A )
] )
if norm_in_type == "layer":
_UpperCamelCase = nn.LayerNorm(_A )
elif norm_in_type is None:
_UpperCamelCase = None
else:
raise ValueError(F"""Unsupported norm_in_type: {norm_in_type}.""" )
_UpperCamelCase = nn.LayerNorm(_A )
_UpperCamelCase = nn.Linear(_A , _A )
_UpperCamelCase = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0000.0 )
causal_attention_mask.triu_(1 )
_UpperCamelCase = causal_attention_mask[None, ...]
self.register_buffer('''causal_attention_mask''' , _A , persistent=_A )
_UpperCamelCase = nn.Parameter(torch.zeros(1 , _A ) )
_UpperCamelCase = nn.Parameter(torch.zeros(1 , _A ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = {}
def fn_recursive_add_processors(_A : str , _A : torch.nn.Module , _A : Dict[str, AttentionProcessor] ):
if hasattr(_A , '''set_processor''' ):
_UpperCamelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" , _A , _A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_A , _A , _A )
return processors
def UpperCamelCase_ ( self : Optional[Any] , _A : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
_UpperCamelCase = len(self.attn_processors.keys() )
if isinstance(_A , _A ) and len(_A ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(_A )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_A : str , _A : torch.nn.Module , _A : List[str] ):
if hasattr(_A , '''set_processor''' ):
if not isinstance(_A , _A ):
module.set_processor(_A )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" , _A , _A )
for name, module in self.named_children():
fn_recursive_attn_processor(_A , _A , _A )
def UpperCamelCase_ ( self : Union[str, Any] ):
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase_ ( self : Union[str, Any] , _A : Optional[Any] , _A : Union[torch.Tensor, float, int] , _A : torch.FloatTensor , _A : Optional[torch.FloatTensor] = None , _A : Optional[torch.BoolTensor] = None , _A : bool = True , ):
_UpperCamelCase = hidden_states.shape[0]
_UpperCamelCase = timestep
if not torch.is_tensor(_A ):
_UpperCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(_A ) and len(timesteps.shape ) == 0:
_UpperCamelCase = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_UpperCamelCase = timesteps * torch.ones(_A , dtype=timesteps.dtype , device=timesteps.device )
_UpperCamelCase = self.time_proj(_A )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_UpperCamelCase = timesteps_projected.to(dtype=self.dtype )
_UpperCamelCase = self.time_embedding(_A )
if self.embedding_proj_norm is not None:
_UpperCamelCase = self.embedding_proj_norm(_A )
_UpperCamelCase = self.embedding_proj(_A )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_UpperCamelCase = self.encoder_hidden_states_proj(_A )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''' )
_UpperCamelCase = self.proj_in(_A )
_UpperCamelCase = self.positional_embedding.to(hidden_states.dtype )
_UpperCamelCase = []
_UpperCamelCase = 0
if encoder_hidden_states is not None:
additional_embeds.append(_A )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_UpperCamelCase = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_UpperCamelCase = hidden_states[:, None, :]
_UpperCamelCase = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_UpperCamelCase = self.prd_embedding.to(hidden_states.dtype ).expand(_A , -1 , -1 )
additional_embeds.append(_A )
_UpperCamelCase = torch.cat(
_A , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_UpperCamelCase = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_UpperCamelCase = F.pad(
_A , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_UpperCamelCase = hidden_states + positional_embeddings
if attention_mask is not None:
_UpperCamelCase = (1 - attention_mask.to(hidden_states.dtype )) * -1_0000.0
_UpperCamelCase = F.pad(_A , (0, self.additional_embeddings) , value=0.0 )
_UpperCamelCase = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_UpperCamelCase = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_UpperCamelCase = self.norm_in(_A )
for block in self.transformer_blocks:
_UpperCamelCase = block(_A , attention_mask=_A )
_UpperCamelCase = self.norm_out(_A )
if self.prd_embedding is not None:
_UpperCamelCase = hidden_states[:, -1]
else:
_UpperCamelCase = hidden_states[:, additional_embeddings_len:]
_UpperCamelCase = self.proj_to_clip_embeddings(_A )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_A )
def UpperCamelCase_ ( self : List[Any] , _A : Dict ):
_UpperCamelCase = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 10 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __init__( self : Dict , a : Tuple , a : Any=13 , a : Any=7 , a : Union[str, Any]=True , a : List[Any]=True , a : List[str]=False , a : List[str]=True , a : Any=99 , a : str=32 , a : Any=5 , a : Optional[int]=4 , a : Union[str, Any]=37 , a : Dict="gelu" , a : List[Any]=0.1 , a : Optional[Any]=0.1 , a : List[str]=512 , a : Union[str, Any]=16 , a : str=2 , a : Dict=0.02 , a : Optional[int]=3 , a : Union[str, Any]=4 , a : int=None , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Any = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = seq_length
SCREAMING_SNAKE_CASE : List[Any] = is_training
SCREAMING_SNAKE_CASE : int = use_input_mask
SCREAMING_SNAKE_CASE : Tuple = use_token_type_ids
SCREAMING_SNAKE_CASE : str = use_labels
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Tuple = num_labels
SCREAMING_SNAKE_CASE : Tuple = num_choices
SCREAMING_SNAKE_CASE : Optional[Any] = scope
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : str = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self : Optional[Any] , a : int , a : Optional[int] , a : Optional[int] , a : Dict , a : str , a : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = DistilBertModel(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(a , a )
SCREAMING_SNAKE_CASE : Optional[Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Tuple , a : Optional[int] , a : Dict , a : Tuple , a : int , a : int , a : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = DistilBertForMaskedLM(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : str = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : List[Any] , a : int , a : Optional[Any] , a : Optional[Any] , a : str , a : str , a : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Optional[int] , a : str , a : Any , a : int , a : Optional[Any] , a : int , a : str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = DistilBertForSequenceClassification(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Optional[Any] , a : List[Any] , a : Optional[int] , a : Union[str, Any] , a : Dict , a : Any , a : Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : List[str] = DistilBertForTokenClassification(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : int , a : Any , a : Optional[int] , a : Union[str, Any] , a : Tuple , a : Optional[int] , a : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.num_choices
SCREAMING_SNAKE_CASE : Any = DistilBertForMultipleChoice(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Optional[Any] = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE)) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ =(
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = DistilBertModelTester(self )
SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=a , dim=37 )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a )
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a )
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a )
def __UpperCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a )
@slow
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Any = model_class(config=a )
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(a , a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.jit.trace(
a , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , "traced_model.pt" ) )
SCREAMING_SNAKE_CASE : Tuple = torch.jit.load(os.path.join(a , "traced_model.pt" ) , map_location=a )
loaded(inputs_dict["input_ids"].to(a ) , inputs_dict["attention_mask"].to(a ) )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = DistilBertModel.from_pretrained("distilbert-base-uncased" )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(a , attention_mask=a )[0]
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) ) | 25 | 0 |
'''simple docstring'''
import math
lowercase_ = 10
lowercase_ = 7
lowercase_ = BALLS_PER_COLOUR * NUM_COLOURS
def lowerCAmelCase (__A = 20):
"""simple docstring"""
_a = math.comb(__A , __A)
_a = math.comb(NUM_BALLS - BALLS_PER_COLOUR , __A)
_a = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 11 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 25 | 0 |
def UpperCamelCase ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
return "\n".join(
F'{number} * {i} = {number * i}' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=1_0))
| 12 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE : Union[str, Any] = KarrasVeScheduler()
SCREAMING_SNAKE_CASE : Any = KarrasVePipeline(unet=a , scheduler=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(num_inference_steps=2 , generator=a , output_type="numpy" ).images
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = pipe(num_inference_steps=2 , generator=a , output_type="numpy" , return_dict=a )[0]
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = "google/ncsnpp-celebahq-256"
SCREAMING_SNAKE_CASE : List[Any] = UNetaDModel.from_pretrained(a )
SCREAMING_SNAKE_CASE : Any = KarrasVeScheduler()
SCREAMING_SNAKE_CASE : Optional[Any] = KarrasVePipeline(unet=a , scheduler=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe(num_inference_steps=20 , generator=a , output_type="numpy" ).images
SCREAMING_SNAKE_CASE : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 25 | 0 |
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : Tuple = logging.get_logger(__name__)
A__ : Dict = {
"""google/owlvit-base-patch32""": """https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json""",
"""google/owlvit-base-patch16""": """https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json""",
"""google/owlvit-large-patch14""": """https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json""",
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : str = 'owlvit_text_model'
def __init__( self , SCREAMING_SNAKE_CASE_=4_94_08 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=20_48 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_="quick_gelu" , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=4_94_06 , SCREAMING_SNAKE_CASE_=4_94_07 , **SCREAMING_SNAKE_CASE_ , ) -> Tuple:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = vocab_size
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : int = intermediate_size
__lowerCamelCase : Any = num_hidden_layers
__lowerCamelCase : Dict = num_attention_heads
__lowerCamelCase : List[Any] = max_position_embeddings
__lowerCamelCase : str = hidden_act
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : Tuple = attention_dropout
__lowerCamelCase : Tuple = initializer_range
__lowerCamelCase : Optional[Any] = initializer_factor
@classmethod
def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase , __lowerCamelCase : Any = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
__lowerCamelCase : str = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Tuple = 'owlvit_vision_model'
def __init__( self , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_="quick_gelu" , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1.0 , **SCREAMING_SNAKE_CASE_ , ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = hidden_size
__lowerCamelCase : str = intermediate_size
__lowerCamelCase : Union[str, Any] = num_hidden_layers
__lowerCamelCase : Tuple = num_attention_heads
__lowerCamelCase : str = num_channels
__lowerCamelCase : Dict = image_size
__lowerCamelCase : str = patch_size
__lowerCamelCase : Tuple = hidden_act
__lowerCamelCase : List[str] = layer_norm_eps
__lowerCamelCase : Tuple = attention_dropout
__lowerCamelCase : Optional[int] = initializer_range
__lowerCamelCase : Any = initializer_factor
@classmethod
def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
__lowerCamelCase : Tuple = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = 'owlvit'
lowerCamelCase : Dict = True
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=2.6_5_9_2 , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ) -> Optional[int]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
if text_config is None:
__lowerCamelCase : Dict = {}
logger.info('text_config is None. Initializing the OwlViTTextConfig with default values.' )
if vision_config is None:
__lowerCamelCase : Optional[Any] = {}
logger.info('vision_config is None. initializing the OwlViTVisionConfig with default values.' )
__lowerCamelCase : List[str] = OwlViTTextConfig(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = OwlViTVisionConfig(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = projection_dim
__lowerCamelCase : List[str] = logit_scale_init_value
__lowerCamelCase : List[Any] = return_dict
__lowerCamelCase : str = 1.0
@classmethod
def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase , __lowerCamelCase : str = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@classmethod
def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Any:
__lowerCamelCase : Tuple = {}
__lowerCamelCase : Optional[int] = text_config
__lowerCamelCase : Dict = vision_config
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> str:
__lowerCamelCase : int = copy.deepcopy(self.__dict__ )
__lowerCamelCase : Optional[Any] = self.text_config.to_dict()
__lowerCamelCase : Any = self.vision_config.to_dict()
__lowerCamelCase : str = self.__class__.model_type
return output
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
] )
@property
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('logits_per_image', {0: 'batch'}),
('logits_per_text', {0: 'batch'}),
('text_embeds', {0: 'batch'}),
('image_embeds', {0: 'batch'}),
] )
@property
def lowercase_ ( self ) -> float:
return 1E-4
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = None , ) -> Mapping[str, Any]:
__lowerCamelCase : Optional[int] = super().generate_dummy_inputs(
processor.tokenizer , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = super().generate_dummy_inputs(
processor.image_processor , batch_size=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
return {**text_input_dict, **image_input_dict}
@property
def lowercase_ ( self ) -> int:
return 14
| 13 |
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Optional[int] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : Optional[int] = 0
while b > 0:
if b & 1:
SCREAMING_SNAKE_CASE : Optional[Any] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res | 25 | 0 |
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=None , _a=None ) -> List[str]:
_a : List[Any] = data
_a : List[str] = previous
_a : Union[str, Any] = next_node
def __str__( self ) -> str:
return F"""{self.data}"""
def __lowercase ( self ) -> int:
return self.data
def __lowercase ( self ) -> Union[str, Any]:
return self.next
def __lowercase ( self ) -> str:
return self.previous
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a ) -> str:
_a : int = head
def __iter__( self ) -> List[Any]:
return self
def __lowercase ( self ) -> Optional[int]:
if not self.current:
raise StopIteration
else:
_a : Optional[int] = self.current.get_data()
_a : Dict = self.current.get_next()
return value
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> str:
_a : Tuple = None # First node in list
_a : List[str] = None # Last node in list
def __str__( self ) -> List[str]:
_a : Optional[int] = self.head
_a : int = []
while current is not None:
nodes.append(current.get_data() )
_a : Optional[Any] = current.get_next()
return " ".join(str(_a ) for node in nodes )
def __contains__( self , _a ) -> Union[str, Any]:
_a : Optional[Any] = self.head
while current:
if current.get_data() == value:
return True
_a : Optional[Any] = current.get_next()
return False
def __iter__( self ) -> Dict:
return LinkedListIterator(self.head )
def __lowercase ( self ) -> Any:
if self.head:
return self.head.get_data()
return None
def __lowercase ( self ) -> Tuple:
if self.tail:
return self.tail.get_data()
return None
def __lowercase ( self , _a ) -> None:
if self.head is None:
_a : str = node
_a : Tuple = node
else:
self.insert_before_node(self.head , _a )
def __lowercase ( self , _a ) -> None:
if self.head is None:
self.set_head(_a )
else:
self.insert_after_node(self.tail , _a )
def __lowercase ( self , _a ) -> None:
_a : str = Node(_a )
if self.head is None:
self.set_head(_a )
else:
self.set_tail(_a )
def __lowercase ( self , _a , _a ) -> None:
_a : List[str] = node
_a : str = node.previous
if node.get_previous() is None:
_a : Optional[int] = node_to_insert
else:
_a : List[str] = node_to_insert
_a : List[str] = node_to_insert
def __lowercase ( self , _a , _a ) -> None:
_a : Union[str, Any] = node
_a : List[str] = node.next
if node.get_next() is None:
_a : Any = node_to_insert
else:
_a : List[str] = node_to_insert
_a : Union[str, Any] = node_to_insert
def __lowercase ( self , _a , _a ) -> None:
_a : List[Any] = 1
_a : Any = Node(_a )
_a : Any = self.head
while node:
if current_position == position:
self.insert_before_node(_a , _a )
return
current_position += 1
_a : Tuple = node.next
self.insert_after_node(self.tail , _a )
def __lowercase ( self , _a ) -> Node:
_a : Optional[Any] = self.head
while node:
if node.get_data() == item:
return node
_a : Any = node.get_next()
raise Exception('''Node not found''' )
def __lowercase ( self , _a ) -> Union[str, Any]:
if (node := self.get_node(_a )) is not None:
if node == self.head:
_a : Optional[Any] = self.head.get_next()
if node == self.tail:
_a : Any = self.tail.get_previous()
self.remove_node_pointers(_a )
@staticmethod
def __lowercase ( _a ) -> None:
if node.get_next():
_a : Optional[int] = node.previous
if node.get_previous():
_a : List[Any] = node.next
_a : Optional[int] = None
_a : Tuple = None
def __lowercase ( self ) -> str:
return self.head is None
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='roformer'
def __init__( self : Dict , a : Any=5_0000 , a : List[Any]=None , a : str=768 , a : str=12 , a : Tuple=12 , a : Optional[Any]=3072 , a : List[str]="gelu" , a : List[Any]=0.1 , a : Union[str, Any]=0.1 , a : Tuple=1536 , a : List[str]=2 , a : Tuple=0.02 , a : Any=1e-12 , a : Optional[int]=0 , a : Union[str, Any]=False , a : int=True , **a : str , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=a , **a )
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size if embedding_size is None else embedding_size
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE : Any = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = rotary_value
SCREAMING_SNAKE_CASE : int = use_cache
class _UpperCamelCase ( __A ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE : str = {0: "batch", 1: "sequence"}
SCREAMING_SNAKE_CASE : List[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] ) | 25 | 0 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
A : Union[str, Any] = logging.get_logger(__name__)
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__(self : Dict , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : List[str] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use DeformableDetrImageProcessor instead.""" , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 15 |
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
a_ = logging.getLogger(__name__)
a_ = 'Hello world! cรฉcรฉ herlolip'
a_ = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : List[Any] = BertAbsConfig(
temp_dir="." , finetune_bert=_a , large=_a , share_emb=_a , use_bert_emb=_a , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
SCREAMING_SNAKE_CASE : Dict = torch.load(_a , lambda _a , _a: storage)
SCREAMING_SNAKE_CASE : str = AbsSummarizer(_a , torch.device("cpu") , _a)
original.eval()
SCREAMING_SNAKE_CASE : List[str] = BertAbsSummarizer(_a , torch.device("cpu"))
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model")
new_model.bert.load_state_dict(original.bert.state_dict())
new_model.decoder.load_state_dict(original.decoder.state_dict())
new_model.generator.load_state_dict(original.generator.state_dict())
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical")
SCREAMING_SNAKE_CASE : List[str] = BertTokenizer.from_pretrained("bert-base-uncased")
# prepare the model inputs
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode("This is sample รฉร alj'-.")
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_a)))
SCREAMING_SNAKE_CASE : int = torch.tensor(_a).unsqueeze(0)
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode("This is sample 3 รฉร alj'-.")
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_a)))
SCREAMING_SNAKE_CASE : int = torch.tensor(_a).unsqueeze(0)
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight)) == 0
# forward pass
SCREAMING_SNAKE_CASE : List[Any] = encoder_input_ids
SCREAMING_SNAKE_CASE : List[Any] = decoder_input_ids
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Dict = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
SCREAMING_SNAKE_CASE : Optional[int] = original(_a , _a , _a , _a , _a , _a , _a)[0]
SCREAMING_SNAKE_CASE : Dict = original.generator(_a)
SCREAMING_SNAKE_CASE : Any = new_model(
_a , _a , _a , _a , _a)[0]
SCREAMING_SNAKE_CASE : Tuple = new_model.generator(_a)
SCREAMING_SNAKE_CASE : List[Any] = torch.max(torch.abs(output_converted_model - output_original_model)).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_a))
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.max(torch.abs(output_converted_generator - output_original_generator)).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_a))
SCREAMING_SNAKE_CASE : int = torch.allclose(_a , _a , atol=1E-3)
if are_identical:
logging.info("all weights are equal up to 1e-3")
else:
raise ValueError("the weights are different. The new model is likely different from the original one.")
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary")
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
a_ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
) | 25 | 0 |
def __a ( A__ : int = 1000 ):
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'{solution() = }') | 16 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
a_ = parser.parse_args()
a_ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
a_ = CLIPImageProcessor()
a_ = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
a_ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path) | 25 | 0 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ,a__ : Dict ,a__ : Union[str, Any] ,a__ : Any ) -> Optional[int]: # noqa: E741
while r - l > 1:
__A : Any = (l + r) // 2
if v[m] >= key:
__A : Optional[int] = m
else:
__A : List[Any] = m # noqa: E741
return r
def __SCREAMING_SNAKE_CASE ( a__ : list[int] ) -> int:
if len(a__ ) == 0:
return 0
__A : str = [0] * len(a__ )
__A : List[str] = 1
__A : List[Any] = v[0]
for i in range(1 ,len(a__ ) ):
if v[i] < tail[0]:
__A : int = v[i]
elif v[i] > tail[length - 1]:
__A : Union[str, Any] = v[i]
length += 1
else:
__A : Any = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 25 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 |
from math import pi, sqrt, tan
def lowerCamelCase__ ( _a):
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values")
return 6 * side_length**2
def lowerCamelCase__ ( _a , _a , _a):
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values")
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase__ ( _a):
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values")
return 4 * pi * radius**2
def lowerCamelCase__ ( _a):
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values")
return 3 * pi * radius**2
def lowerCamelCase__ ( _a , _a):
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values")
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase__ ( _a , _a , _a):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values")
SCREAMING_SNAKE_CASE : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase__ ( _a , _a):
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values")
return 2 * pi * radius * (height + radius)
def lowerCamelCase__ ( _a , _a):
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values")
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori")
return 4 * pow(_a , 2) * torus_radius * tube_radius
def lowerCamelCase__ ( _a , _a):
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values")
return length * width
def lowerCamelCase__ ( _a):
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values")
return side_length**2
def lowerCamelCase__ ( _a , _a):
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values")
return (base * height) / 2
def lowerCamelCase__ ( _a , _a , _a):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values")
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle")
SCREAMING_SNAKE_CASE : List[str] = (sidea + sidea + sidea) / 2
SCREAMING_SNAKE_CASE : Optional[int] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea))
return area
def lowerCamelCase__ ( _a , _a):
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values")
return base * height
def lowerCamelCase__ ( _a , _a , _a):
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values")
return 1 / 2 * (basea + basea) * height
def lowerCamelCase__ ( _a):
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values")
return pi * radius**2
def lowerCamelCase__ ( _a , _a):
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values")
return pi * radius_x * radius_y
def lowerCamelCase__ ( _a , _a):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values")
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase__ ( _a , _a):
if not isinstance(_a , _a) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides")
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side")
return (sides * length**2) / (4 * tan(pi / sides))
return (sides * length**2) / (4 * tan(pi / sides))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F'''Rectangle: {area_rectangle(10, 20) = }''')
print(F'''Square: {area_square(10) = }''')
print(F'''Triangle: {area_triangle(10, 10) = }''')
print(F'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(F'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(F'''Rhombus: {area_rhombus(10, 20) = }''')
print(F'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(F'''Circle: {area_circle(20) = }''')
print(F'''Ellipse: {area_ellipse(10, 20) = }''')
print('\nSurface Areas of various geometric shapes: \n')
print(F'''Cube: {surface_area_cube(20) = }''')
print(F'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(F'''Sphere: {surface_area_sphere(20) = }''')
print(F'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(F'''Cone: {surface_area_cone(10, 20) = }''')
print(F'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(F'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(F'''Torus: {surface_area_torus(20, 10) = }''')
print(F'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(F'''Square: {area_reg_polygon(4, 10) = }''')
print(F'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''') | 25 | 0 |
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class _UpperCAmelCase:
pass
| 19 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 25 | 0 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase_ (lowercase__ ):
snake_case =(PNDMScheduler,)
snake_case =(('num_inference_steps', 50),)
def __UpperCamelCase ( self , **lowercase_) -> Optional[int]:
a__ ={
'num_train_timesteps': 1000,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**lowercase_)
return config
def __UpperCamelCase ( self , lowercase_=0 , **lowercase_) -> List[Any]:
a__ =dict(self.forward_default_kwargs)
a__ =kwargs.pop('num_inference_steps' , lowercase_)
a__ =self.dummy_sample
a__ =0.1 * sample
a__ =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a__ =self.get_scheduler_config(**lowercase_)
a__ =scheduler_class(**lowercase_)
scheduler.set_timesteps(lowercase_)
# copy over dummy past residuals
a__ =dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_)
a__ =scheduler_class.from_pretrained(lowercase_)
new_scheduler.set_timesteps(lowercase_)
# copy over dummy past residuals
a__ =dummy_past_residuals[:]
a__ =scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
a__ =new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a__ =scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
a__ =new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def __UpperCamelCase ( self) -> Tuple:
pass
def __UpperCamelCase ( self , lowercase_=0 , **lowercase_) -> str:
a__ =dict(self.forward_default_kwargs)
a__ =kwargs.pop('num_inference_steps' , lowercase_)
a__ =self.dummy_sample
a__ =0.1 * sample
a__ =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a__ =self.get_scheduler_config()
a__ =scheduler_class(**lowercase_)
scheduler.set_timesteps(lowercase_)
# copy over dummy past residuals (must be after setting timesteps)
a__ =dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_)
a__ =scheduler_class.from_pretrained(lowercase_)
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_)
# copy over dummy past residual (must be after setting timesteps)
a__ =dummy_past_residuals[:]
a__ =scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
a__ =new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a__ =scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
a__ =new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def __UpperCamelCase ( self , **lowercase_) -> int:
a__ =self.scheduler_classes[0]
a__ =self.get_scheduler_config(**lowercase_)
a__ =scheduler_class(**lowercase_)
a__ =10
a__ =self.dummy_model()
a__ =self.dummy_sample_deter
scheduler.set_timesteps(lowercase_)
for i, t in enumerate(scheduler.prk_timesteps):
a__ =model(lowercase_ , lowercase_)
a__ =scheduler.step_prk(lowercase_ , lowercase_ , lowercase_).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
a__ =model(lowercase_ , lowercase_)
a__ =scheduler.step_plms(lowercase_ , lowercase_ , lowercase_).prev_sample
return sample
def __UpperCamelCase ( self) -> Dict:
a__ =dict(self.forward_default_kwargs)
a__ =kwargs.pop('num_inference_steps' , lowercase_)
for scheduler_class in self.scheduler_classes:
a__ =self.get_scheduler_config()
a__ =scheduler_class(**lowercase_)
a__ =self.dummy_sample
a__ =0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_ , 'set_timesteps'):
scheduler.set_timesteps(lowercase_)
elif num_inference_steps is not None and not hasattr(lowercase_ , 'set_timesteps'):
a__ =num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a__ =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a__ =dummy_past_residuals[:]
a__ =scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_).prev_sample
a__ =scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
a__ =scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_).prev_sample
a__ =scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def __UpperCamelCase ( self) -> str:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_)
def __UpperCamelCase ( self) -> Any:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_)
a__ =self.scheduler_classes[0]
a__ =self.get_scheduler_config(steps_offset=1)
a__ =scheduler_class(**lowercase_)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]) , )
def __UpperCamelCase ( self) -> List[str]:
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02]):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_)
def __UpperCamelCase ( self) -> Tuple:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_)
def __UpperCamelCase ( self) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_)
def __UpperCamelCase ( self) -> Dict:
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowercase_)
def __UpperCamelCase ( self) -> Optional[Any]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100]):
self.check_over_forward(num_inference_steps=lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
a__ =27
for scheduler_class in self.scheduler_classes:
a__ =self.dummy_sample
a__ =0.1 * sample
a__ =self.get_scheduler_config()
a__ =scheduler_class(**lowercase_)
scheduler.set_timesteps(lowercase_)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
a__ =scheduler.step_prk(lowercase_ , lowercase_ , lowercase_).prev_sample
def __UpperCamelCase ( self) -> Dict:
with self.assertRaises(lowercase_):
a__ =self.scheduler_classes[0]
a__ =self.get_scheduler_config()
a__ =scheduler_class(**lowercase_)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def __UpperCamelCase ( self) -> int:
a__ =self.full_loop()
a__ =torch.sum(torch.abs(lowercase_))
a__ =torch.mean(torch.abs(lowercase_))
assert abs(result_sum.item() - 1_98.13_18) < 1e-2
assert abs(result_mean.item() - 0.25_80) < 1e-3
def __UpperCamelCase ( self) -> Any:
a__ =self.full_loop(prediction_type='v_prediction')
a__ =torch.sum(torch.abs(lowercase_))
a__ =torch.mean(torch.abs(lowercase_))
assert abs(result_sum.item() - 67.39_86) < 1e-2
assert abs(result_mean.item() - 0.08_78) < 1e-3
def __UpperCamelCase ( self) -> Tuple:
# We specify different beta, so that the first alpha is 0.99
a__ =self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01)
a__ =torch.sum(torch.abs(lowercase_))
a__ =torch.mean(torch.abs(lowercase_))
assert abs(result_sum.item() - 2_30.03_99) < 1e-2
assert abs(result_mean.item() - 0.29_95) < 1e-3
def __UpperCamelCase ( self) -> List[str]:
# We specify different beta, so that the first alpha is 0.99
a__ =self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01)
a__ =torch.sum(torch.abs(lowercase_))
a__ =torch.mean(torch.abs(lowercase_))
assert abs(result_sum.item() - 1_86.94_82) < 1e-2
assert abs(result_mean.item() - 0.24_34) < 1e-3
| 20 |
from __future__ import annotations
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Optional[Any] = 2
SCREAMING_SNAKE_CASE : Optional[int] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_a)
if n > 1:
factors.append(_a)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod() | 25 | 0 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCAmelCase_ : List[Any] = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", "|"),
datarow=DataRow("", "|", "|"),
padding=1,
with_header_hide=None,
)
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : str = {"type": "section", "text": {"type": "plain_text", "text": "No failed tests! ๐ค", "emoji": True}}
UpperCAmelCase_ : Tuple = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": F"""๐ค Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
"emoji": True,
},
}
]
UpperCAmelCase_ : Optional[Any] = 0
for log in Path().glob("*.log"):
UpperCAmelCase_ : Tuple = 0
with open(log, "r") as f:
for line in f:
UpperCAmelCase_ : Optional[Any] = json.loads(line)
if line.get("nodeid", "") != "":
UpperCAmelCase_ : List[Any] = line["nodeid"]
if line.get("duration", None) is not None:
UpperCAmelCase_ : Any = F"""{line["duration"]:.4f}"""
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCAmelCase_ : Optional[Any] = []
log.unlink()
UpperCAmelCase_ : List[str] = ""
UpperCAmelCase_ : Any = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : List[str] = {}
for test in failed_tests:
UpperCAmelCase_ : int = test[0].split("::")
UpperCAmelCase_ : str = data[0].split("/")[-1]
if data[0] not in filesafailed:
UpperCAmelCase_ : Tuple = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCAmelCase_ : List[str] = [test[0] for test in failed_table]
UpperCAmelCase_ : int = list(set(files))
# Count number of instances in failed_tests
UpperCAmelCase_ : int = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCAmelCase_ : List[Any] = tabulate(
table,
headers=["Test Location", "Num Failed"],
tablefmt=hf_table_format,
stralign="right",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
UpperCAmelCase_ : str = "Too many failed tests, please see the full report in the Action results."
UpperCAmelCase_ : Dict = len(err) + 10
UpperCAmelCase_ : List[Any] = message[: 3000 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
UpperCAmelCase_ : str = "No failed tests! ๐ค"
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
UpperCAmelCase_ : str = WebClient(token=os.environ["SLACK_API_TOKEN"])
if message != "No failed tests! ๐ค":
UpperCAmelCase_ : Tuple = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
}
payload.append(md_report)
UpperCAmelCase_ : Any = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*",
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Check Action results",
"emoji": True,
},
"url": F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
UpperCAmelCase_ : Optional[int] = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
UpperCAmelCase_ : Tuple = client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload)
UpperCAmelCase_ : List[Any] = response.data["ts"]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCAmelCase_ : List[str] = ""
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCAmelCase_ : str = row[0]
else:
UpperCAmelCase_ : List[Any] = ""
UpperCAmelCase_ : Dict = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel="#accelerate-ci-daily",
thread_ts=ts,
blocks=[payload],
)
| 21 |
from math import factorial, pi
def lowerCamelCase__ ( _a , _a = 30):
if not isinstance(_a , (int, float)):
raise ValueError("maclaurin_sin() requires either an int or float for theta")
if not isinstance(_a , _a) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy")
SCREAMING_SNAKE_CASE : int = float(_a)
SCREAMING_SNAKE_CASE : Dict = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1) for r in range(_a))
def lowerCamelCase__ ( _a , _a = 30):
if not isinstance(_a , (int, float)):
raise ValueError("maclaurin_cos() requires either an int or float for theta")
if not isinstance(_a , _a) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy")
SCREAMING_SNAKE_CASE : str = float(_a)
SCREAMING_SNAKE_CASE : Any = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r) for r in range(_a))
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15)) | 25 | 0 |
'''simple docstring'''
from __future__ import annotations
import bisect
def snake_case_ (UpperCamelCase : list[int] , UpperCamelCase : int , UpperCamelCase : int = 0 , UpperCamelCase : int = -1 ):
'''simple docstring'''
if hi < 0:
_a = len(UpperCamelCase )
while lo < hi:
_a = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
_a = mid + 1
else:
_a = mid
return lo
def snake_case_ (UpperCamelCase : list[int] , UpperCamelCase : int , UpperCamelCase : int = 0 , UpperCamelCase : int = -1 ):
'''simple docstring'''
if hi < 0:
_a = len(UpperCamelCase )
while lo < hi:
_a = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
_a = mid + 1
else:
_a = mid
return lo
def snake_case_ (UpperCamelCase : list[int] , UpperCamelCase : int , UpperCamelCase : int = 0 , UpperCamelCase : int = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_left(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , UpperCamelCase )
def snake_case_ (UpperCamelCase : list[int] , UpperCamelCase : int , UpperCamelCase : int = 0 , UpperCamelCase : int = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_right(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , UpperCamelCase )
def snake_case_ (UpperCamelCase : list[int] , UpperCamelCase : int ):
'''simple docstring'''
_a = 0
_a = len(UpperCamelCase ) - 1
while left <= right:
_a = left + (right - left) // 2
_a = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
_a = midpoint - 1
else:
_a = midpoint + 1
return None
def snake_case_ (UpperCamelCase : list[int] , UpperCamelCase : int ):
'''simple docstring'''
_a = bisect.bisect_left(UpperCamelCase , UpperCamelCase )
if index != len(UpperCamelCase ) and sorted_collection[index] == item:
return index
return None
def snake_case_ (UpperCamelCase : list[int] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
if right < left:
return None
_a = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(UpperCamelCase , UpperCamelCase , UpperCamelCase , midpoint - 1 )
else:
return binary_search_by_recursion(UpperCamelCase , UpperCamelCase , midpoint + 1 , UpperCamelCase )
if __name__ == "__main__":
_snake_case : Union[str, Any] = input('Enter numbers separated by comma:\n').strip()
_snake_case : Union[str, Any] = sorted(int(item) for item in user_input.split(','))
_snake_case : Union[str, Any] = int(input('Enter a single number to be found in the list:\n'))
_snake_case : Union[str, Any] = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 22 |
from __future__ import annotations
import math
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , a : int ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = size
# approximate the overall size of segment tree with given value
SCREAMING_SNAKE_CASE : Any = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
SCREAMING_SNAKE_CASE : Union[str, Any] = [0 for i in range(0 , 4 * size )]
SCREAMING_SNAKE_CASE : Any = [0 for i in range(0 , 4 * size )] # flag for lazy update
def __UpperCamelCase ( self : Tuple , a : int ) -> int:
"""simple docstring"""
return idx * 2
def __UpperCamelCase ( self : str , a : int ) -> int:
"""simple docstring"""
return idx * 2 + 1
def __UpperCamelCase ( self : int , a : int , a : int , a : int , a : list[int] ) -> None:
"""simple docstring"""
if left_element == right_element:
SCREAMING_SNAKE_CASE : int = a[left_element - 1]
else:
SCREAMING_SNAKE_CASE : Optional[int] = (left_element + right_element) // 2
self.build(self.left(a ) , a , a , a )
self.build(self.right(a ) , mid + 1 , a , a )
SCREAMING_SNAKE_CASE : List[Any] = max(
self.segment_tree[self.left(a )] , self.segment_tree[self.right(a )] )
def __UpperCamelCase ( self : Optional[Any] , a : int , a : int , a : int , a : int , a : int , a : int ) -> bool:
"""simple docstring"""
if self.flag[idx] is True:
SCREAMING_SNAKE_CASE : Any = self.lazy[idx]
SCREAMING_SNAKE_CASE : List[str] = False
if left_element != right_element:
SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx]
SCREAMING_SNAKE_CASE : int = self.lazy[idx]
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : List[Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
SCREAMING_SNAKE_CASE : Optional[Any] = val
if left_element != right_element:
SCREAMING_SNAKE_CASE : str = val
SCREAMING_SNAKE_CASE : str = val
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : Optional[Any] = True
return True
SCREAMING_SNAKE_CASE : int = (left_element + right_element) // 2
self.update(self.left(a ) , a , a , a , a , a )
self.update(self.right(a ) , mid + 1 , a , a , a , a )
SCREAMING_SNAKE_CASE : Optional[int] = max(
self.segment_tree[self.left(a )] , self.segment_tree[self.right(a )] )
return True
def __UpperCamelCase ( self : Dict , a : int , a : int , a : int , a : int , a : int ) -> int | float:
"""simple docstring"""
if self.flag[idx] is True:
SCREAMING_SNAKE_CASE : int = self.lazy[idx]
SCREAMING_SNAKE_CASE : List[Any] = False
if left_element != right_element:
SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx]
SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx]
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
SCREAMING_SNAKE_CASE : Dict = (left_element + right_element) // 2
SCREAMING_SNAKE_CASE : Tuple = self.query(self.left(a ) , a , a , a , a )
SCREAMING_SNAKE_CASE : Tuple = self.query(self.right(a ) , mid + 1 , a , a , a )
return max(a , a )
def __str__( self : str ) -> str:
"""simple docstring"""
return str([self.query(1 , 1 , self.size , a , a ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
a_ = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
a_ = 15
a_ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt) | 25 | 0 |
import itertools
import string
from collections.abc import Generator, Iterable
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = iter(__lowercase)
while True:
UpperCamelCase_ = tuple(itertools.islice(__lowercase , __lowercase))
if not chunk:
return
yield chunk
def _snake_case (__lowercase):
UpperCamelCase_ = ''.join([c.upper() for c in dirty if c in string.ascii_letters])
UpperCamelCase_ = ''
if len(__lowercase) < 2:
return dirty
for i in range(len(__lowercase) - 1):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(__lowercase) & 1:
clean += "X"
return clean
def _snake_case (__lowercase):
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
UpperCamelCase_ = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
UpperCamelCase_ = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(__lowercase)
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(__lowercase)
return table
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = generate_table(__lowercase)
UpperCamelCase_ = prepare_input(__lowercase)
UpperCamelCase_ = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__lowercase , 2):
UpperCamelCase_ , UpperCamelCase_ = divmod(table.index(__lowercase) , 5)
UpperCamelCase_ , UpperCamelCase_ = divmod(table.index(__lowercase) , 5)
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = generate_table(__lowercase)
UpperCamelCase_ = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__lowercase , 2):
UpperCamelCase_ , UpperCamelCase_ = divmod(table.index(__lowercase) , 5)
UpperCamelCase_ , UpperCamelCase_ = divmod(table.index(__lowercase) , 5)
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 23 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
sd_pipe.set_scheduler("sample_euler" )
SCREAMING_SNAKE_CASE : Optional[int] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = sd_pipe([prompt] , generator=a , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
SCREAMING_SNAKE_CASE : Tuple = output.images
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
sd_pipe.set_scheduler("sample_euler" )
SCREAMING_SNAKE_CASE : List[str] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe([prompt] , generator=a , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
SCREAMING_SNAKE_CASE : List[Any] = output.images
SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
SCREAMING_SNAKE_CASE : str = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = sd_pipe(
[prompt] , generator=a , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=a , )
SCREAMING_SNAKE_CASE : str = output.images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 25 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : int = '''cvt'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[7, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2] , __SCREAMING_SNAKE_CASE=[2, 1, 1] , __SCREAMING_SNAKE_CASE=[64, 192, 384] , __SCREAMING_SNAKE_CASE=[1, 3, 6] , __SCREAMING_SNAKE_CASE=[1, 2, 10] , __SCREAMING_SNAKE_CASE=[4.0, 4.0, 4.0] , __SCREAMING_SNAKE_CASE=[0.0, 0.0, 0.0] , __SCREAMING_SNAKE_CASE=[0.0, 0.0, 0.0] , __SCREAMING_SNAKE_CASE=[0.0, 0.0, 0.1] , __SCREAMING_SNAKE_CASE=[True, True, True] , __SCREAMING_SNAKE_CASE=[False, False, True] , __SCREAMING_SNAKE_CASE=["dw_bn", "dw_bn", "dw_bn"] , __SCREAMING_SNAKE_CASE=[3, 3, 3] , __SCREAMING_SNAKE_CASE=[1, 1, 1] , __SCREAMING_SNAKE_CASE=[2, 2, 2] , __SCREAMING_SNAKE_CASE=[1, 1, 1] , __SCREAMING_SNAKE_CASE=[1, 1, 1] , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-12 , **__SCREAMING_SNAKE_CASE , ) -> int:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
__snake_case = num_channels
__snake_case = patch_sizes
__snake_case = patch_stride
__snake_case = patch_padding
__snake_case = embed_dim
__snake_case = num_heads
__snake_case = depth
__snake_case = mlp_ratio
__snake_case = attention_drop_rate
__snake_case = drop_rate
__snake_case = drop_path_rate
__snake_case = qkv_bias
__snake_case = cls_token
__snake_case = qkv_projection_method
__snake_case = kernel_qkv
__snake_case = padding_kv
__snake_case = stride_kv
__snake_case = padding_q
__snake_case = stride_q
__snake_case = initializer_range
__snake_case = layer_norm_eps
| 24 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = ort.SessionOptions()
SCREAMING_SNAKE_CASE : Union[str, Any] = False
return options
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
SCREAMING_SNAKE_CASE : int = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Optional[Any] = "A red cat sitting on a park bench"
SCREAMING_SNAKE_CASE : Tuple = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(
prompt=a , image=a , mask_image=a , guidance_scale=7.5 , num_inference_steps=10 , generator=a , output_type="np" , )
SCREAMING_SNAKE_CASE : List[Any] = output.images
SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
SCREAMING_SNAKE_CASE : Union[str, Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=a , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : List[Any] = "A red cat sitting on a park bench"
SCREAMING_SNAKE_CASE : Dict = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe(
prompt=a , image=a , mask_image=a , guidance_scale=7.5 , num_inference_steps=20 , generator=a , output_type="np" , )
SCREAMING_SNAKE_CASE : List[str] = output.images
SCREAMING_SNAKE_CASE : Optional[int] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 | 25 | 0 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger()
@dataclass
class _A :
lowercase__: nn.Module
lowercase__: List[nn.Module] = field(default_factory=__lowercase )
lowercase__: list = field(default_factory=__lowercase )
def lowercase__ ( self : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Tensor , __magic_name__ : Tensor ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = len(list(m.modules() ) ) == 1 or isinstance(__magic_name__ , nn.Convad ) or isinstance(__magic_name__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(__magic_name__ )
def __call__( self : Optional[int] , __magic_name__ : Tensor ) -> Any:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(__magic_name__ )
[x.remove() for x in self.handles]
return self
@property
def lowercase__ ( self : List[Any] ) -> str:
"""simple docstring"""
return list(filter(lambda __magic_name__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _A :
lowercase__: nn.Module
lowercase__: nn.Module
lowercase__: int = 1
lowercase__: List = field(default_factory=__lowercase )
lowercase__: List = field(default_factory=__lowercase )
lowercase__: bool = True
def __call__( self : Dict , __magic_name__ : Tensor ) -> List[str]:
"""simple docstring"""
__snake_case : Any = Tracker(self.dest )(__magic_name__ ).parametrized
__snake_case : Dict = Tracker(self.src )(__magic_name__ ).parametrized
__snake_case : List[str] = list(filter(lambda __magic_name__ : type(__magic_name__ ) not in self.src_skip , __magic_name__ ) )
__snake_case : List[str] = list(filter(lambda __magic_name__ : type(__magic_name__ ) not in self.dest_skip , __magic_name__ ) )
if len(__magic_name__ ) != len(__magic_name__ ) and self.raise_if_mismatch:
raise Exception(
f'''Numbers of operations are different. Source module has {len(__magic_name__ )} operations while'''
f''' destination module has {len(__magic_name__ )}.''' )
for dest_m, src_m in zip(__magic_name__ , __magic_name__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'''Transfered from={src_m} to={dest_m}''' )
class _A ( nn.Module ):
def __init__( self : Dict , __magic_name__ : nn.Module ) -> Any:
"""simple docstring"""
super().__init__()
__snake_case : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(("""conv1""", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("""block""" ), f'''Unexpected layer name {k}'''
__snake_case : Optional[int] = len(__magic_name__ ) + 1
feature_blocks.append((f'''res{block_index}''', v) )
__snake_case : str = nn.ModuleDict(__magic_name__ )
def lowercase__ ( self : int , __magic_name__ : Tensor ) -> Tuple:
"""simple docstring"""
return get_trunk_forward_outputs(
__magic_name__ , out_feat_keys=__magic_name__ , feature_blocks=self._feature_blocks , )
class _A ( __lowercase ):
def lowercase__ ( self : Any , __magic_name__ : str ) -> str:
"""simple docstring"""
__snake_case : List[Any] = x.split("""-""" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : int , __magic_name__ : str ) -> Callable[[], Tuple[nn.Module, Dict]]:
"""simple docstring"""
if x not in self:
__snake_case : Tuple = self.convert_name_to_timm(__magic_name__ )
__snake_case : List[str] = partial(lambda: (timm.create_model(__magic_name__ , pretrained=__magic_name__ ).eval(), None) )
else:
__snake_case : Tuple = super().__getitem__(__magic_name__ )
return val
class _A ( __lowercase ):
def __getitem__( self : Optional[Any] , __magic_name__ : str ) -> Callable[[], nn.Module]:
"""simple docstring"""
if "seer" in x and "in1k" not in x:
__snake_case : Any = RegNetModel
else:
__snake_case : Tuple = RegNetForImageClassification
return val
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
for from_key, to_key in keys:
__snake_case : int = from_state_dict[from_key].clone()
print(F'''Copied key={from_key} to={to_key}''' )
return to_state_dict
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , ) -> Union[str, Any]:
"""simple docstring"""
print(F'''Converting {name}...''' )
with torch.no_grad():
__snake_case , __snake_case : str = from_model_func()
__snake_case : Optional[Any] = our_model_func(_lowerCamelCase ).eval()
__snake_case : Dict = ModuleTransfer(src=_lowerCamelCase , dest=_lowerCamelCase , raise_if_mismatch=_lowerCamelCase )
__snake_case : Union[str, Any] = torch.randn((1, 3, 224, 224) )
module_transfer(_lowerCamelCase )
if from_state_dict is not None:
__snake_case : Optional[Any] = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
__snake_case : Any = [("""0.clf.0.weight""", """classifier.1.weight"""), ("""0.clf.0.bias""", """classifier.1.bias""")]
__snake_case : List[str] = manually_copy_vissl_head(_lowerCamelCase , our_model.state_dict() , _lowerCamelCase )
our_model.load_state_dict(_lowerCamelCase )
__snake_case : str = our_model(_lowerCamelCase , output_hidden_states=_lowerCamelCase )
__snake_case : Any = (
our_outputs.logits if isinstance(_lowerCamelCase , _lowerCamelCase ) else our_outputs.last_hidden_state
)
__snake_case : List[str] = from_model(_lowerCamelCase )
__snake_case : List[Any] = from_output[-1] if type(_lowerCamelCase ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
__snake_case : Union[str, Any] = our_outputs.hidden_states[-1]
assert torch.allclose(_lowerCamelCase , _lowerCamelCase ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="""Add model""" , use_temp_dir=_lowerCamelCase , )
__snake_case : Optional[int] = 224 if """seer""" not in name else 384
# we can use the convnext one
__snake_case : List[Any] = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" , size=_lowerCamelCase )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="""Add image processor""" , use_temp_dir=_lowerCamelCase , )
print(F'''Pushed {name}''' )
def _a ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = True ) -> List[str]:
"""simple docstring"""
__snake_case : int = """imagenet-1k-id2label.json"""
__snake_case : int = 1000
__snake_case : Any = (1, num_labels)
__snake_case : Union[str, Any] = """huggingface/label-files"""
__snake_case : List[str] = num_labels
__snake_case : int = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) ) , """r""" ) )
__snake_case : Optional[int] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
__snake_case : Optional[Any] = idalabel
__snake_case : Any = {v: k for k, v in idalabel.items()}
__snake_case : int = partial(_lowerCamelCase , num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase )
__snake_case : Optional[Any] = {
"""regnet-x-002""": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="""x""" ),
"""regnet-x-004""": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="""x""" ),
"""regnet-x-006""": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="""x""" ),
"""regnet-x-008""": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="""x""" ),
"""regnet-x-016""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="""x""" ),
"""regnet-x-032""": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type="""x""" ),
"""regnet-x-040""": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type="""x""" ),
"""regnet-x-064""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type="""x""" ),
"""regnet-x-080""": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type="""x""" ),
"""regnet-x-120""": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type="""x""" ),
"""regnet-x-160""": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type="""x""" ),
"""regnet-x-320""": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type="""x""" ),
# y variant
"""regnet-y-002""": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
"""regnet-y-004""": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
"""regnet-y-006""": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
"""regnet-y-008""": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
"""regnet-y-016""": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
"""regnet-y-032""": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
"""regnet-y-040""": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
"""regnet-y-064""": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
"""regnet-y-080""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
"""regnet-y-120""": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
"""regnet-y-160""": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
"""regnet-y-320""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"""regnet-y-320-seer""": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"""regnet-y-640-seer""": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"""regnet-y-1280-seer""": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"""regnet-y-2560-seer""": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"""regnet-y-10b-seer""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
# finetuned on imagenet
"""regnet-y-320-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"""regnet-y-640-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"""regnet-y-1280-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"""regnet-y-2560-seer-in1k""": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"""regnet-y-10b-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
}
__snake_case : List[str] = NameToOurModelFuncMap()
__snake_case : List[str] = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(_lowerCamelCase , _lowerCamelCase ) -> Tuple[nn.Module, Dict]:
__snake_case : List[str] = torch.hub.load_state_dict_from_url(_lowerCamelCase , model_dir=str(_lowerCamelCase ) , map_location="""cpu""" )
__snake_case : Optional[Any] = model_func()
# check if we have a head, if yes add it
__snake_case : str = files["""classy_state_dict"""]["""base_model"""]["""model"""]
__snake_case : Any = model_state_dict["""trunk"""]
model.load_state_dict(_lowerCamelCase )
return model.eval(), model_state_dict["heads"]
# pretrained
__snake_case : List[str] = partial(
_lowerCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__snake_case : List[str] = partial(
_lowerCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__snake_case : Tuple = partial(
_lowerCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__snake_case : str = partial(
_lowerCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch""" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_20.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
__snake_case : Dict = partial(
_lowerCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__snake_case : List[Any] = partial(
_lowerCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__snake_case : Union[str, Any] = partial(
_lowerCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__snake_case : str = partial(
_lowerCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch""" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_20.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
_lowerCamelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , _lowerCamelCase , _lowerCamelCase , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
_lowerCamelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
return config, expected_shape
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 26 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCamelCase__ ( _a):
return getitem, k
def lowerCamelCase__ ( _a , _a):
return setitem, k, v
def lowerCamelCase__ ( _a):
return delitem, k
def lowerCamelCase__ ( _a , _a , *_a):
try:
return fun(_a , *_a), None
except Exception as e:
return None, e
a_ = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
a_ = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
a_ = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
a_ = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
a_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items"),
pytest.param(_overwrite_items , id="overwrite items"),
pytest.param(_delete_items , id="delete items"),
pytest.param(_access_absent_items , id="access absent items"),
pytest.param(_add_with_resize_up , id="add with resize up"),
pytest.param(_add_with_resize_down , id="add with resize down"),
) , )
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Dict = HashMap(initial_block_size=4)
SCREAMING_SNAKE_CASE : List[str] = {}
for _, (fun, *args) in enumerate(_a):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = _run_operation(_a , _a , *_a)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = _run_operation(_a , _a , *_a)
assert my_res == py_res
assert str(_a) == str(_a)
assert set(_a) == set(_a)
assert len(_a) == len(_a)
assert set(my.items()) == set(py.items())
def lowerCamelCase__ ( ):
def is_public(_a) -> bool:
return not name.startswith("_")
SCREAMING_SNAKE_CASE : List[str] = {name for name in dir({}) if is_public(_a)}
SCREAMING_SNAKE_CASE : Union[str, Any] = {name for name in dir(HashMap()) if is_public(_a)}
assert dict_public_names > hash_public_names | 25 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__A : Optional[int] = logging.get_logger(__name__)
__A : Any = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = 'imagegpt'
__magic_name__ = ['past_key_values']
__magic_name__ = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , snake_case_=512 + 1 , snake_case_=32 * 32 , snake_case_=512 , snake_case_=24 , snake_case_=8 , snake_case_=None , snake_case_="quick_gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1E-5 , snake_case_=0.02 , snake_case_=True , snake_case_=True , snake_case_=False , snake_case_=False , snake_case_=False , **snake_case_ , ):
_A = vocab_size
_A = n_positions
_A = n_embd
_A = n_layer
_A = n_head
_A = n_inner
_A = activation_function
_A = resid_pdrop
_A = embd_pdrop
_A = attn_pdrop
_A = layer_norm_epsilon
_A = initializer_range
_A = scale_attn_weights
_A = use_cache
_A = scale_attn_by_inverse_layer_idx
_A = reorder_and_upcast_attn
_A = tie_word_embeddings
super().__init__(tie_word_embeddings=snake_case_ , **snake_case_ )
class lowerCamelCase( __snake_case ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self ):
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
] )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ = 1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , snake_case_ = 3 , snake_case_ = 32 , snake_case_ = 32 , ):
_A = self._generate_dummy_images(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_A = dict(preprocessor(images=snake_case_ , return_tensors=snake_case_ ) )
return inputs
| 27 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {'configuration_van': ['VAN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VanConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'VAN_PRETRAINED_MODEL_ARCHIVE_LIST',
'VanForImageClassification',
'VanModel',
'VanPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 25 | 0 |
'''simple docstring'''
from __future__ import annotations
class _a :
'''simple docstring'''
def __init__( self, A = 0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = key
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
assert isinstance(A, A ) and isinstance(A, A )
SCREAMING_SNAKE_CASE : Tuple = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(A ) ^ key ) for ch in content]
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
assert isinstance(A, A ) and isinstance(A, A )
SCREAMING_SNAKE_CASE : Union[str, Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(A ) ^ key ) for ch in content]
def UpperCamelCase_ ( self, A, A = 0 ):
'''simple docstring'''
assert isinstance(A, A ) and isinstance(A, A )
SCREAMING_SNAKE_CASE : str = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
SCREAMING_SNAKE_CASE : str = ''
for ch in content:
ans += chr(ord(A ) ^ key )
return ans
def UpperCamelCase_ ( self, A, A = 0 ):
'''simple docstring'''
assert isinstance(A, A ) and isinstance(A, A )
SCREAMING_SNAKE_CASE : Any = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
SCREAMING_SNAKE_CASE : Dict = ''
for ch in content:
ans += chr(ord(A ) ^ key )
return ans
def UpperCamelCase_ ( self, A, A = 0 ):
'''simple docstring'''
assert isinstance(A, A ) and isinstance(A, A )
try:
with open(A ) as fin, open('encrypt.out', 'w+' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(A, A ) )
except OSError:
return False
return True
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
assert isinstance(A, A ) and isinstance(A, A )
try:
with open(A ) as fin, open('decrypt.out', 'w+' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(A, A ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 28 |
from __future__ import annotations
def lowerCamelCase__ ( _a):
if len(_a) == 0:
return []
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = min(_a), max(_a)
SCREAMING_SNAKE_CASE : Dict = int(max_value - min_value) + 1
SCREAMING_SNAKE_CASE : list[list] = [[] for _ in range(_a)]
for i in my_list:
buckets[int(i - min_value)].append(_a)
return [v for bucket in buckets for v in sorted(_a)]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15] | 25 | 0 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
A_ = """\
Text data.
Second line of data."""
A_ = """file"""
@pytest.fixture(scope='''session''' )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
lowerCamelCase_ = bytes(lowerCAmelCase__ ,'''utf-8''' )
with zstd.open(lowerCAmelCase__ ,'''wb''' ) as f:
f.write(lowerCAmelCase__ )
return path
@pytest.fixture
def lowercase ( lowerCAmelCase__ ):
with open(os.path.join(tmpfs.local_root_dir ,lowerCAmelCase__ ) ,'''w''' ) as f:
f.write(lowerCAmelCase__ )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' ,['''gzip''', '''xz''', '''zstd'''] )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
lowerCamelCase_ = input_paths[compression_format]
lowerCamelCase_ = tmp_path / '''cache'''
lowerCamelCase_ = DownloadConfig(cache_dir=lowerCAmelCase__ ,extract_compressed_file=lowerCAmelCase__ )
lowerCamelCase_ = cached_path(lowerCAmelCase__ ,download_config=lowerCAmelCase__ )
with open(lowerCAmelCase__ ) as f:
lowerCamelCase_ = f.read()
with open(lowerCAmelCase__ ) as f:
lowerCamelCase_ = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' ,[True, False] )
@pytest.mark.parametrize('''default_cache_dir''' ,[True, False] )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = '''custom_cache'''
lowerCamelCase_ = '''custom_extracted_dir'''
lowerCamelCase_ = tmp_path / '''custom_extracted_path'''
if default_extracted:
lowerCamelCase_ = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' ,lowerCAmelCase__ )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' ,str(lowerCAmelCase__ ) )
lowerCamelCase_ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
lowerCamelCase_ = xz_file
lowerCamelCase_ = (
DownloadConfig(extract_compressed_file=lowerCAmelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir ,extract_compressed_file=lowerCAmelCase__ )
)
lowerCamelCase_ = cached_path(lowerCAmelCase__ ,download_config=lowerCAmelCase__ )
assert Path(lowerCAmelCase__ ).parent.parts[-2:] == expected
def lowercase ( lowerCAmelCase__ ):
# absolute path
lowerCamelCase_ = str(Path(lowerCAmelCase__ ).resolve() )
assert cached_path(lowerCAmelCase__ ) == text_file
# relative path
lowerCamelCase_ = str(Path(lowerCAmelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowerCAmelCase__ ) == text_file
def lowercase ( lowerCAmelCase__ ):
# absolute path
lowerCamelCase_ = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(lowerCAmelCase__ ):
cached_path(lowerCAmelCase__ )
# relative path
lowerCamelCase_ = '''./__missing_file__.txt'''
with pytest.raises(lowerCAmelCase__ ):
cached_path(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = get_from_cache(f"tmp://{tmpfs_file}" )
with open(lowerCAmelCase__ ) as f:
lowerCamelCase_ = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' ,lowerCAmelCase__ )
def lowercase ( ):
with pytest.raises(lowerCAmelCase__ ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' ,lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(lowerCAmelCase__ ):
http_get('''https://huggingface.co''' ,temp_file=lowerCAmelCase__ )
with pytest.raises(lowerCAmelCase__ ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' ,lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(lowerCAmelCase__ ):
ftp_get('''ftp://huggingface.co''' ,temp_file=lowerCAmelCase__ )
with pytest.raises(lowerCAmelCase__ ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' ,lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(lowerCAmelCase__ ):
fsspec_get('''s3://huggingface.co''' ,temp_file=lowerCAmelCase__ )
with pytest.raises(lowerCAmelCase__ ):
fsspec_head('''s3://huggingface.co''' )
| 29 |
a_ = frozenset(
[
'prompt',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
a_ = frozenset(['prompt', 'negative_prompt'])
a_ = frozenset([])
a_ = frozenset(['image'])
a_ = frozenset(
[
'image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['image'])
a_ = frozenset(
[
'prompt',
'image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
a_ = frozenset(['prompt', 'image', 'negative_prompt'])
a_ = frozenset(
[
# Text guided image variation with an image mask
'prompt',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
a_ = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt'])
a_ = frozenset(
[
# image variation with an image mask
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['image', 'mask_image'])
a_ = frozenset(
[
'example_image',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['example_image', 'image', 'mask_image'])
a_ = frozenset(['class_labels'])
a_ = frozenset(['class_labels'])
a_ = frozenset(['batch_size'])
a_ = frozenset([])
a_ = frozenset(['batch_size'])
a_ = frozenset([])
a_ = frozenset(
[
'prompt',
'audio_length_in_s',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
a_ = frozenset(['prompt', 'negative_prompt'])
a_ = frozenset(['input_tokens'])
a_ = frozenset(['input_tokens']) | 25 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
'configuration_xmod': [
'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XmodConfig',
'XmodOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST',
'XmodForCausalLM',
'XmodForMaskedLM',
'XmodForMultipleChoice',
'XmodForQuestionAnswering',
'XmodForSequenceClassification',
'XmodForTokenClassification',
'XmodModel',
'XmodPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 30 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a_ = get_logger()
a_ = None
class _UpperCamelCase ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
'''simple docstring'''
def __init__( self : Optional[Any] , a : str=None , a : List[Any]=None , **a : Any ) -> Optional[Any]:
"""simple docstring"""
super().__init__(features=a )
import jax
from jaxlib.xla_client import Device
if isinstance(a , a ):
raise ValueError(
F"Expected {device} to be a `str` not {type(a )}, as `jaxlib.xla_extension.Device` "
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
SCREAMING_SNAKE_CASE : List[str] = device if isinstance(a , a ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE : str = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"Device with string identifier {self.device} not listed among the available "
F"devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default "
F"device: {str(jax.devices()[0] )}." )
SCREAMING_SNAKE_CASE : Any = str(jax.devices()[0] )
SCREAMING_SNAKE_CASE : Any = jnp_array_kwargs
@staticmethod
def __UpperCamelCase ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
"""simple docstring"""
import jax
return {str(a ): device for device in jax.devices()}
def __UpperCamelCase ( self : Dict , a : Tuple ) -> str:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(a , a ) and column:
if all(
isinstance(a , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(a , axis=0 )
return column
def __UpperCamelCase ( self : Dict , a : str ) -> str:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(a , (str, bytes, type(a )) ):
return value
elif isinstance(a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
SCREAMING_SNAKE_CASE : Dict = {"dtype": jnp.intaa}
else:
SCREAMING_SNAKE_CASE : str = {"dtype": jnp.intaa}
elif isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
SCREAMING_SNAKE_CASE : int = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a , PIL.Image.Image ):
SCREAMING_SNAKE_CASE : Dict = np.asarray(a )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE : Optional[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(a , **{**default_dtype, **self.jnp_array_kwargs} )
def __UpperCamelCase ( self : Any , a : List[str] ) -> Dict:
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(a , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(a , "__array__" ) and not isinstance(a , jax.Array ):
SCREAMING_SNAKE_CASE : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
elif isinstance(a , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
return self._tensorize(a )
def __UpperCamelCase ( self : Optional[Any] , a : dict ) -> Dict:
"""simple docstring"""
return map_nested(self._recursive_tensorize , a , map_list=a )
def __UpperCamelCase ( self : Dict , a : pa.Table ) -> Mapping:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.numpy_arrow_extractor().extract_row(a )
SCREAMING_SNAKE_CASE : List[Any] = self.python_features_decoder.decode_row(a )
return self.recursive_tensorize(a )
def __UpperCamelCase ( self : Optional[int] , a : pa.Table ) -> "jax.Array":
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.numpy_arrow_extractor().extract_column(a )
SCREAMING_SNAKE_CASE : Optional[Any] = self.python_features_decoder.decode_column(a , pa_table.column_names[0] )
SCREAMING_SNAKE_CASE : Tuple = self.recursive_tensorize(a )
SCREAMING_SNAKE_CASE : Optional[int] = self._consolidate(a )
return column
def __UpperCamelCase ( self : List[Any] , a : pa.Table ) -> Mapping:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.numpy_arrow_extractor().extract_batch(a )
SCREAMING_SNAKE_CASE : str = self.python_features_decoder.decode_batch(a )
SCREAMING_SNAKE_CASE : List[Any] = self.recursive_tensorize(a )
for column_name in batch:
SCREAMING_SNAKE_CASE : List[Any] = self._consolidate(batch[column_name] )
return batch | 25 | 0 |
import numpy as np
def UpperCAmelCase_ ( __UpperCAmelCase : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _UpperCamelCase :
'''simple docstring'''
@staticmethod
def __UpperCamelCase ( *a : str , **a : int ) -> str:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =MODEL_FOR_OBJECT_DETECTION_MAPPING
def __UpperCamelCase ( self : Optional[Any] , a : str , a : Optional[Any] , a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = ObjectDetectionPipeline(model=a , image_processor=a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __UpperCamelCase ( self : List[Any] , a : Optional[int] , a : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(a ) , 0 )
for detected_object in outputs:
self.assertEqual(
a , {
"score": ANY(a ),
"label": ANY(a ),
"box": {"xmin": ANY(a ), "ymin": ANY(a ), "xmax": ANY(a ), "ymax": ANY(a )},
} , )
import datasets
SCREAMING_SNAKE_CASE : Any = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
SCREAMING_SNAKE_CASE : Dict = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
SCREAMING_SNAKE_CASE : Tuple = object_detector(a , threshold=0.0 )
self.assertEqual(len(a ) , len(a ) )
for outputs in batch_outputs:
self.assertGreater(len(a ) , 0 )
for detected_object in outputs:
self.assertEqual(
a , {
"score": ANY(a ),
"label": ANY(a ),
"box": {"xmin": ANY(a ), "ymin": ANY(a ), "xmax": ANY(a ), "ymax": ANY(a )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def __UpperCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
pass
@require_torch
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = "hf-internal-testing/tiny-detr-mobilenetsv3"
SCREAMING_SNAKE_CASE : Dict = AutoModelForObjectDetection.from_pretrained(a )
SCREAMING_SNAKE_CASE : Any = AutoFeatureExtractor.from_pretrained(a )
SCREAMING_SNAKE_CASE : Tuple = ObjectDetectionPipeline(model=a , feature_extractor=a )
SCREAMING_SNAKE_CASE : int = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
SCREAMING_SNAKE_CASE : Dict = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(a )
SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained(a )
SCREAMING_SNAKE_CASE : int = ObjectDetectionPipeline(model=a , feature_extractor=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
SCREAMING_SNAKE_CASE : int = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : Tuple = pipeline("object-detection" , model=a )
SCREAMING_SNAKE_CASE : Tuple = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
SCREAMING_SNAKE_CASE : str = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = 0.9985
SCREAMING_SNAKE_CASE : int = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : List[str] = pipeline("object-detection" , model=a )
SCREAMING_SNAKE_CASE : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=a )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = "Narsil/layoutlmv3-finetuned-funsd"
SCREAMING_SNAKE_CASE : Dict = 0.9993
SCREAMING_SNAKE_CASE : str = pipeline("object-detection" , model=a , threshold=a )
SCREAMING_SNAKE_CASE : List[Any] = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , ) | 25 | 0 |
from pathlib import Path
import numpy as np
from PIL import Image
def A__ ( SCREAMING_SNAKE_CASE_ : np.ndarray ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_9_8_9 * r + 0.5_8_7_0 * g + 0.1_1_4_0 * b
def A__ ( SCREAMING_SNAKE_CASE_ : np.ndarray ) -> np.ndarray:
"""simple docstring"""
return (gray > 1_27) & (gray <= 2_55)
def A__ ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = np.zeros_like(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
_UpperCAmelCase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
_UpperCAmelCase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
_UpperCAmelCase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
UpperCAmelCase_ = Path(__file__).resolve().parent / "image_data" / "lena.jpg"
UpperCAmelCase_ = np.array(Image.open(lena_path))
# kernel to be applied
UpperCAmelCase_ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
UpperCAmelCase_ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
UpperCAmelCase_ = Image.fromarray(output).convert("RGB")
pil_img.save("result_dilation.png") | 32 |
def lowerCamelCase__ ( _a):
if not isinstance(_a , _a):
SCREAMING_SNAKE_CASE : Tuple = f"Input value of [number={number}] must be an integer"
raise TypeError(_a)
if number < 0:
return False
SCREAMING_SNAKE_CASE : Union[str, Any] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 25 | 0 |
import argparse
import os
import re
lowerCamelCase__ : Optional[int] = """src/transformers/models/auto"""
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
lowerCamelCase__ : List[Any] = re.compile(r"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""")
# re pattern that matches identifiers in mappings
lowerCamelCase__ : Dict = re.compile(r"""\s*\(\s*\"(\S[^\"]+)\"""")
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase = False ) -> Union[str, Any]:
with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
snake_case__ = f.read()
snake_case__ = content.split('''\n''' )
snake_case__ = []
snake_case__ = 0
while line_idx < len(__lowerCAmelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
snake_case__ = len(re.search(r'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
snake_case__ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
snake_case__ = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
snake_case__ = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : _re_identifier.search(__lowerCAmelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(__lowerCAmelCase ) )
elif "\n".join(__lowerCAmelCase ) != content:
return True
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = False ) -> Tuple:
snake_case__ = [os.path.join(__lowerCAmelCase , __lowerCAmelCase ) for f in os.listdir(__lowerCAmelCase ) if f.endswith('''.py''' )]
snake_case__ = [sort_auto_mapping(__lowerCAmelCase , overwrite=__lowerCAmelCase ) for fname in fnames]
if not overwrite and any(__lowerCAmelCase ):
snake_case__ = [f for f, d in zip(__lowerCAmelCase , __lowerCAmelCase ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {', '.join(__lowerCAmelCase )}. Run `make style` to fix"""
''' this.''' )
if __name__ == "__main__":
lowerCamelCase__ : Any = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
lowerCamelCase__ : Union[str, Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 33 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __init__( self : Dict , a : Tuple , a : Any=13 , a : Any=7 , a : Union[str, Any]=True , a : List[Any]=True , a : List[str]=False , a : List[str]=True , a : Any=99 , a : str=32 , a : Any=5 , a : Optional[int]=4 , a : Union[str, Any]=37 , a : Dict="gelu" , a : List[Any]=0.1 , a : Optional[Any]=0.1 , a : List[str]=512 , a : Union[str, Any]=16 , a : str=2 , a : Dict=0.02 , a : Optional[int]=3 , a : Union[str, Any]=4 , a : int=None , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Any = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = seq_length
SCREAMING_SNAKE_CASE : List[Any] = is_training
SCREAMING_SNAKE_CASE : int = use_input_mask
SCREAMING_SNAKE_CASE : Tuple = use_token_type_ids
SCREAMING_SNAKE_CASE : str = use_labels
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Tuple = num_labels
SCREAMING_SNAKE_CASE : Tuple = num_choices
SCREAMING_SNAKE_CASE : Optional[Any] = scope
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : str = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self : Optional[Any] , a : int , a : Optional[int] , a : Optional[int] , a : Dict , a : str , a : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = DistilBertModel(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(a , a )
SCREAMING_SNAKE_CASE : Optional[Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Tuple , a : Optional[int] , a : Dict , a : Tuple , a : int , a : int , a : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = DistilBertForMaskedLM(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : str = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : List[Any] , a : int , a : Optional[Any] , a : Optional[Any] , a : str , a : str , a : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Optional[int] , a : str , a : Any , a : int , a : Optional[Any] , a : int , a : str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = DistilBertForSequenceClassification(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Optional[Any] , a : List[Any] , a : Optional[int] , a : Union[str, Any] , a : Dict , a : Any , a : Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : List[str] = DistilBertForTokenClassification(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : int , a : Any , a : Optional[int] , a : Union[str, Any] , a : Tuple , a : Optional[int] , a : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.num_choices
SCREAMING_SNAKE_CASE : Any = DistilBertForMultipleChoice(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Optional[Any] = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE)) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ =(
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = DistilBertModelTester(self )
SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=a , dim=37 )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a )
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a )
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a )
def __UpperCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a )
@slow
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Any = model_class(config=a )
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(a , a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.jit.trace(
a , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , "traced_model.pt" ) )
SCREAMING_SNAKE_CASE : Tuple = torch.jit.load(os.path.join(a , "traced_model.pt" ) , map_location=a )
loaded(inputs_dict["input_ids"].to(a ) , inputs_dict["attention_mask"].to(a ) )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = DistilBertModel.from_pretrained("distilbert-base-uncased" )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(a , attention_mask=a )[0]
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) ) | 25 | 0 |
"""simple docstring"""
import operator
def __snake_case ( _lowercase ,_lowercase = False ,_lowercase = None ):
"""simple docstring"""
UpperCamelCase = operator.lt if reverse else operator.gt
UpperCamelCase = solution or []
if not arr:
return solution
UpperCamelCase = [arr.pop(0 )]
for i, item in enumerate(_lowercase ):
if _operator(_lowercase ,sublist[-1] ):
sublist.append(_lowercase )
arr.pop(_lowercase )
# merging sublist into solution list
if not solution:
solution.extend(_lowercase )
else:
while sublist:
UpperCamelCase = sublist.pop(0 )
for i, xx in enumerate(_lowercase ):
if not _operator(_lowercase ,_lowercase ):
solution.insert(_lowercase ,_lowercase )
break
else:
solution.append(_lowercase )
strand_sort(_lowercase ,_lowercase ,_lowercase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1] | 34 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 25 | 0 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[Any] = (DDIMParallelScheduler,)
lowerCamelCase : Union[str, Any] = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def lowercase__ ( self : Optional[int] , **_lowercase : Any ):
SCREAMING_SNAKE_CASE__ : int = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**_lowercase )
return config
def lowercase__ ( self : Optional[Any] , **_lowercase : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : str = self.get_scheduler_config(**_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scheduler_class(**_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = 10, 0.0
SCREAMING_SNAKE_CASE__ : str = self.dummy_model()
SCREAMING_SNAKE_CASE__ : str = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = scheduler.step(_lowercase , _lowercase , _lowercase , _lowercase ).prev_sample
return sample
def lowercase__ ( self : List[str] ):
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=_lowercase )
def lowercase__ ( self : Optional[Any] ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Any = self.get_scheduler_config(steps_offset=1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = scheduler_class(**_lowercase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def lowercase__ ( self : Optional[Any] ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase )
def lowercase__ ( self : Optional[Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowercase )
def lowercase__ ( self : List[Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase )
def lowercase__ ( self : List[str] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowercase )
def lowercase__ ( self : Optional[int] ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_lowercase )
def lowercase__ ( self : Union[str, Any] ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_lowercase )
def lowercase__ ( self : str ):
self.check_over_configs(thresholding=_lowercase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , )
def lowercase__ ( self : List[Any] ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=_lowercase )
def lowercase__ ( self : Union[str, Any] ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=_lowercase , num_inference_steps=_lowercase )
def lowercase__ ( self : Any ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_lowercase , eta=_lowercase )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Tuple = scheduler_class(**_lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.14771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.32460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1E-5
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : int = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : int = scheduler_class(**_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = 10, 0.0
scheduler.set_timesteps(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = self.dummy_model()
SCREAMING_SNAKE_CASE__ : str = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.dummy_sample_deter + 0.1
SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_sample_deter - 0.1
SCREAMING_SNAKE_CASE__ : List[str] = samplea.shape[0]
SCREAMING_SNAKE_CASE__ : int = torch.stack([samplea, samplea, samplea] , dim=0 )
SCREAMING_SNAKE_CASE__ : Tuple = torch.arange(_lowercase )[0:3, None].repeat(1 , _lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scheduler.batch_step_no_noise(_lowercase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _lowercase )
SCREAMING_SNAKE_CASE__ : Dict = torch.sum(torch.abs(_lowercase ) )
SCREAMING_SNAKE_CASE__ : int = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.4982 ) < 1E-3
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.full_loop()
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.sum(torch.abs(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.223967 ) < 1E-3
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Any = self.full_loop(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.sum(torch.abs(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 52.5302 ) < 1E-2
assert abs(result_mean.item() - 0.0684 ) < 1E-3
def lowercase__ ( self : Optional[Any] ):
# We specify different beta, so that the first alpha is 0.99
SCREAMING_SNAKE_CASE__ : List[str] = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01 )
SCREAMING_SNAKE_CASE__ : Dict = torch.sum(torch.abs(_lowercase ) )
SCREAMING_SNAKE_CASE__ : int = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.1951 ) < 1E-3
def lowercase__ ( self : Tuple ):
# We specify different beta, so that the first alpha is 0.99
SCREAMING_SNAKE_CASE__ : Dict = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01 )
SCREAMING_SNAKE_CASE__ : List[str] = torch.sum(torch.abs(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Any = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.1941 ) < 1E-3
| 35 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE : Union[str, Any] = KarrasVeScheduler()
SCREAMING_SNAKE_CASE : Any = KarrasVePipeline(unet=a , scheduler=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(num_inference_steps=2 , generator=a , output_type="numpy" ).images
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = pipe(num_inference_steps=2 , generator=a , output_type="numpy" , return_dict=a )[0]
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = "google/ncsnpp-celebahq-256"
SCREAMING_SNAKE_CASE : List[Any] = UNetaDModel.from_pretrained(a )
SCREAMING_SNAKE_CASE : Any = KarrasVeScheduler()
SCREAMING_SNAKE_CASE : Optional[Any] = KarrasVePipeline(unet=a , scheduler=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe(num_inference_steps=20 , generator=a , output_type="numpy" ).images
SCREAMING_SNAKE_CASE : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 25 | 0 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _A :
'''simple docstring'''
@staticmethod
def snake_case_ ( *SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
pass
@is_pipeline_test
@require_torch
@require_vision
class _A ( unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[Any] = pipeline("""visual-question-answering""" ,model="""hf-internal-testing/tiny-vilt-random-vqa""" )
snake_case : str = [
{
"""image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Optional[int] = vqa_pipeline(SCREAMING_SNAKE_CASE_ ,top_k=1 )
self.assertEqual(
SCREAMING_SNAKE_CASE_ ,[
[{"""score""": ANY(SCREAMING_SNAKE_CASE_ ), """answer""": ANY(SCREAMING_SNAKE_CASE_ )}],
[{"""score""": ANY(SCREAMING_SNAKE_CASE_ ), """answer""": ANY(SCREAMING_SNAKE_CASE_ )}],
] ,)
@require_torch
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = pipeline("""visual-question-answering""" ,model="""hf-internal-testing/tiny-vilt-random-vqa""" )
snake_case : str = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
snake_case : Any = """How many cats are there?"""
snake_case : Any = vqa_pipeline(image=SCREAMING_SNAKE_CASE_ ,question="""How many cats are there?""" ,top_k=2 )
self.assertEqual(
SCREAMING_SNAKE_CASE_ ,[{"""score""": ANY(SCREAMING_SNAKE_CASE_ ), """answer""": ANY(SCREAMING_SNAKE_CASE_ )}, {"""score""": ANY(SCREAMING_SNAKE_CASE_ ), """answer""": ANY(SCREAMING_SNAKE_CASE_ )}] )
snake_case : Dict = vqa_pipeline({"""image""": image, """question""": question} ,top_k=2 )
self.assertEqual(
SCREAMING_SNAKE_CASE_ ,[{"""score""": ANY(SCREAMING_SNAKE_CASE_ ), """answer""": ANY(SCREAMING_SNAKE_CASE_ )}, {"""score""": ANY(SCREAMING_SNAKE_CASE_ ), """answer""": ANY(SCREAMING_SNAKE_CASE_ )}] )
@slow
@require_torch
def snake_case_ ( self ):
'''simple docstring'''
snake_case : int = pipeline("""visual-question-answering""" ,model="""dandelin/vilt-b32-finetuned-vqa""" )
snake_case : Union[str, Any] = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
snake_case : Union[str, Any] = """How many cats are there?"""
snake_case : str = vqa_pipeline(image=SCREAMING_SNAKE_CASE_ ,question=SCREAMING_SNAKE_CASE_ ,top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ,decimals=4 ) ,[{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}] )
snake_case : Union[str, Any] = vqa_pipeline({"""image""": image, """question""": question} ,top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ,decimals=4 ) ,[{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}] )
snake_case : List[str] = vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] ,top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ,decimals=4 ) ,[[{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}]] * 2 ,)
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
| 36 |
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Optional[int] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : Optional[int] = 0
while b > 0:
if b & 1:
SCREAMING_SNAKE_CASE : Optional[Any] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res | 25 | 0 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
UpperCamelCase : int = logging.getLogger(__name__)
def UpperCamelCase_ ( __a=2 , __a=3 , __a=16 , __a = 10 , __a = 2 ) -> Optional[int]:
def get_dataset(__a ):
a__ : Tuple = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__a , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
a__ : str = get_dataset(__a )
a__ : Union[str, Any] = get_dataset(__a )
a__ : str = DataLoader(__a , shuffle=__a , batch_size=__a , num_workers=4 )
a__ : str = DataLoader(__a , shuffle=__a , batch_size=__a , num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a=None ) -> Optional[int]:
a__ : Optional[Any] = []
for epoch in range(__a ):
# Train quickly
model.train()
for batch in dataloader:
a__, a__ : str = batch
a__ : Optional[int] = model(__a )
a__ : Tuple = torch.nn.functional.mse_loss(__a , __a )
accelerator.backward(__a )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] ):
super().__init__()
a__ : List[Any] = nn.Parameter(torch.randn(1 ) )
a__ : Optional[Any] = nn.Parameter(torch.randn(1 ) )
def _UpperCamelCase( self : int , lowerCamelCase__ : List[Any] ):
return x * self.a + self.b
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase( self : Dict ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
a__ : Optional[int] = DummyModel()
a__ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
a__, a__ : int = dummy_dataloaders()
a__ : List[Any] = ProjectConfiguration(total_limit=1 , project_dir=lowerCamelCase__ , automatic_checkpoint_naming=lowerCamelCase__ )
# Train baseline
a__ : int = Accelerator(project_config=lowerCamelCase__ )
a__, a__, a__, a__ : str = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def _UpperCamelCase( self : List[str] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
a__ : Union[str, Any] = DummyModel()
a__ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
a__, a__ : Any = dummy_dataloaders()
# Train baseline
a__ : Optional[Any] = Accelerator()
a__, a__, a__, a__ : str = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save initial
a__ : Optional[int] = os.path.join(lowerCamelCase__ , "initial" )
accelerator.save_state(lowerCamelCase__ )
((a__), (a__)) : Any = model.a.item(), model.b.item()
a__ : Optional[int] = optimizer.state_dict()
a__ : Any = train(3 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
((a__), (a__)) : Dict = model.a.item(), model.b.item()
a__ : List[Any] = optimizer.state_dict()
# Train partially
set_seed(42 )
a__ : List[Any] = DummyModel()
a__ : Dict = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
a__, a__ : str = dummy_dataloaders()
a__ : Any = Accelerator()
a__, a__, a__, a__ : List[str] = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
accelerator.load_state(lowerCamelCase__ )
((a__), (a__)) : Any = model.a.item(), model.b.item()
a__ : Dict = optimizer.state_dict()
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : List[str] = train(2 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save everything
a__ : Tuple = os.path.join(lowerCamelCase__ , "checkpoint" )
accelerator.save_state(lowerCamelCase__ )
# Load everything back in and make sure all states work
accelerator.load_state(lowerCamelCase__ )
test_rands += train(1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
((a__), (a__)) : Any = model.a.item(), model.b.item()
a__ : Optional[Any] = optimizer.state_dict()
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
a__ : int = DummyModel()
a__ : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
a__, a__ : List[str] = dummy_dataloaders()
a__ : int = ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase__ )
# Train baseline
a__ : int = Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ )
a__, a__, a__, a__ : List[str] = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save initial
accelerator.save_state()
((a__), (a__)) : Optional[Any] = model.a.item(), model.b.item()
a__ : List[Any] = optimizer.state_dict()
a__ : Union[str, Any] = train(3 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
((a__), (a__)) : str = model.a.item(), model.b.item()
a__ : List[Any] = optimizer.state_dict()
# Train partially
set_seed(42 )
a__ : str = DummyModel()
a__ : Dict = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
a__, a__ : Optional[int] = dummy_dataloaders()
a__ : str = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=lowerCamelCase__ )
a__ : str = Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ )
a__, a__, a__, a__ : str = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
accelerator.load_state(os.path.join(lowerCamelCase__ , "checkpoints" , "checkpoint_0" ) )
((a__), (a__)) : int = model.a.item(), model.b.item()
a__ : List[str] = optimizer.state_dict()
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : List[str] = train(2 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCamelCase__ , "checkpoints" , "checkpoint_1" ) )
test_rands += train(1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
((a__), (a__)) : Any = model.a.item(), model.b.item()
a__ : Dict = optimizer.state_dict()
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
a__ : int = torch.tensor([1, 2, 3] )
a__ : List[Any] = torch.tensor([2, 3, 4] )
a__ : Tuple = DummyModel()
a__ : List[str] = torch.optim.Adam(net.parameters() )
a__ : Optional[Any] = Accelerator()
with self.assertRaises(lowerCamelCase__ ) as ve:
accelerator.register_for_checkpointing(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
a__ : Dict = str(ve.exception )
self.assertTrue("Item at index 0" in message )
self.assertTrue("Item at index 1" in message )
self.assertFalse("Item at index 2" in message )
self.assertFalse("Item at index 3" in message )
def _UpperCamelCase( self : str ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
a__ : List[Any] = DummyModel()
a__ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
a__ : List[Any] = torch.optim.lr_scheduler.StepLR(lowerCamelCase__ , step_size=1 , gamma=0.99 )
a__, a__ : Optional[int] = dummy_dataloaders()
a__ : Optional[Any] = ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase__ )
# Train baseline
a__ : int = Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ )
a__, a__, a__, a__, a__ : List[Any] = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save initial
accelerator.save_state()
a__ : Optional[int] = scheduler.state_dict()
train(3 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
self.assertNotEqual(lowerCamelCase__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCamelCase__ , "checkpoints" , "checkpoint_0" ) )
self.assertEqual(lowerCamelCase__ , scheduler.state_dict() )
def _UpperCamelCase( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
a__ : int = DummyModel()
a__ : int = ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase__ , total_limit=2 )
# Train baseline
a__ : Tuple = Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ )
a__ : Optional[Any] = accelerator.prepare(lowerCamelCase__ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(lowerCamelCase__ , "checkpoints" , "checkpoint_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase__ , "checkpoints" , "checkpoint_9" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase__ , "checkpoints" , "checkpoint_10" ) ) )
@require_cuda
def _UpperCamelCase( self : Tuple ):
a__ : Union[str, Any] = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
UpperCamelCase : Optional[Any] = """/tmp/accelerate/state_checkpointing"""
UpperCamelCase : Optional[int] = DummyModel()
UpperCamelCase : List[Any] = torch.optim.Adam(params=model.parameters(), lr=1E-3)
UpperCamelCase : Optional[Any] = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
UpperCamelCase , UpperCamelCase : Tuple = dummy_dataloaders()
UpperCamelCase : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
UpperCamelCase : Union[str, Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
UpperCamelCase , UpperCamelCase : List[str] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
UpperCamelCase : Tuple = group["""params"""][0].device
break
assert param_device.type == accelerator.device.type
UpperCamelCase : Dict = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""")
for group in optimizer.param_groups:
UpperCamelCase : str = group["""params"""][0].device
break
assert (
param_device.type == torch.device("""cpu""").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""")
for group in optimizer.param_groups:
UpperCamelCase : Tuple = group["""params"""][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""):
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 37 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='roformer'
def __init__( self : Dict , a : Any=5_0000 , a : List[Any]=None , a : str=768 , a : str=12 , a : Tuple=12 , a : Optional[Any]=3072 , a : List[str]="gelu" , a : List[Any]=0.1 , a : Union[str, Any]=0.1 , a : Tuple=1536 , a : List[str]=2 , a : Tuple=0.02 , a : Any=1e-12 , a : Optional[int]=0 , a : Union[str, Any]=False , a : int=True , **a : str , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=a , **a )
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size if embedding_size is None else embedding_size
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE : Any = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = rotary_value
SCREAMING_SNAKE_CASE : int = use_cache
class _UpperCamelCase ( __A ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE : str = {0: "batch", 1: "sequence"}
SCREAMING_SNAKE_CASE : List[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] ) | 25 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
A_ : str = logging.get_logger(__name__)
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''AutoTokenizer'''
lowerCamelCase__ = ['''tokenizer''']
lowerCamelCase__ = {
'''semantic_prompt''': 1,
'''coarse_prompt''': 2,
'''fine_prompt''': 2,
}
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
super().__init__(__SCREAMING_SNAKE_CASE )
snake_case__ : int = speaker_embeddings
@classmethod
def __UpperCamelCase ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , **__SCREAMING_SNAKE_CASE ):
if speaker_embeddings_dict_path is not None:
snake_case__ : Dict = get_file_from_repo(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , subfolder=kwargs.pop("""subfolder""" , __SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop("""cache_dir""" , __SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop("""force_download""" , __SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop("""proxies""" , __SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop("""resume_download""" , __SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop("""local_files_only""" , __SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop("""use_auth_token""" , __SCREAMING_SNAKE_CASE ) , revision=kwargs.pop("""revision""" , __SCREAMING_SNAKE_CASE ) , )
if speaker_embeddings_path is None:
logger.warning(
f"`{os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." )
snake_case__ : str = None
else:
with open(__SCREAMING_SNAKE_CASE ) as speaker_embeddings_json:
snake_case__ : Optional[int] = json.load(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : Tuple = None
snake_case__ : List[str] = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
return cls(tokenizer=__SCREAMING_SNAKE_CASE , speaker_embeddings=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , __SCREAMING_SNAKE_CASE="speaker_embeddings" , __SCREAMING_SNAKE_CASE = False , **__SCREAMING_SNAKE_CASE , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , """v2""" ) , exist_ok=__SCREAMING_SNAKE_CASE )
snake_case__ : str = {}
snake_case__ : Tuple = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
snake_case__ : Dict = self._load_voice_preset(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , __SCREAMING_SNAKE_CASE , f"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=__SCREAMING_SNAKE_CASE , )
snake_case__ : Optional[Any] = os.path.join(__SCREAMING_SNAKE_CASE , f"{prompt_key}_{key}.npy" )
snake_case__ : Any = tmp_dict
with open(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , """w""" ) as fp:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
super().save_pretrained(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = self.speaker_embeddings[voice_preset]
snake_case__ : str = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." )
snake_case__ : Tuple = get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , __SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop("""cache_dir""" , __SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop("""force_download""" , __SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop("""proxies""" , __SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop("""resume_download""" , __SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop("""local_files_only""" , __SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop("""use_auth_token""" , __SCREAMING_SNAKE_CASE ) , revision=kwargs.pop("""revision""" , __SCREAMING_SNAKE_CASE ) , )
if path is None:
raise ValueError(
f"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." )
snake_case__ : Dict = np.load(__SCREAMING_SNAKE_CASE )
return voice_preset_dict
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f"Voice preset unrecognized, missing {key} as a key." )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="pt" , __SCREAMING_SNAKE_CASE=2_5_6 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
if voice_preset is not None and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
snake_case__ : List[Any] = self._load_voice_preset(__SCREAMING_SNAKE_CASE )
else:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not voice_preset.endswith(""".npz""" ):
snake_case__ : Optional[Any] = voice_preset + """.npz"""
snake_case__ : Any = np.load(__SCREAMING_SNAKE_CASE )
if voice_preset is not None:
self._validate_voice_preset_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = self.tokenizer(
__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
if voice_preset is not None:
snake_case__ : Any = voice_preset
return encoded_text
| 38 |
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
a_ = logging.getLogger(__name__)
a_ = 'Hello world! cรฉcรฉ herlolip'
a_ = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : List[Any] = BertAbsConfig(
temp_dir="." , finetune_bert=_a , large=_a , share_emb=_a , use_bert_emb=_a , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
SCREAMING_SNAKE_CASE : Dict = torch.load(_a , lambda _a , _a: storage)
SCREAMING_SNAKE_CASE : str = AbsSummarizer(_a , torch.device("cpu") , _a)
original.eval()
SCREAMING_SNAKE_CASE : List[str] = BertAbsSummarizer(_a , torch.device("cpu"))
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model")
new_model.bert.load_state_dict(original.bert.state_dict())
new_model.decoder.load_state_dict(original.decoder.state_dict())
new_model.generator.load_state_dict(original.generator.state_dict())
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical")
SCREAMING_SNAKE_CASE : List[str] = BertTokenizer.from_pretrained("bert-base-uncased")
# prepare the model inputs
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode("This is sample รฉร alj'-.")
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_a)))
SCREAMING_SNAKE_CASE : int = torch.tensor(_a).unsqueeze(0)
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode("This is sample 3 รฉร alj'-.")
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_a)))
SCREAMING_SNAKE_CASE : int = torch.tensor(_a).unsqueeze(0)
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight)) == 0
# forward pass
SCREAMING_SNAKE_CASE : List[Any] = encoder_input_ids
SCREAMING_SNAKE_CASE : List[Any] = decoder_input_ids
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Dict = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
SCREAMING_SNAKE_CASE : Optional[int] = original(_a , _a , _a , _a , _a , _a , _a)[0]
SCREAMING_SNAKE_CASE : Dict = original.generator(_a)
SCREAMING_SNAKE_CASE : Any = new_model(
_a , _a , _a , _a , _a)[0]
SCREAMING_SNAKE_CASE : Tuple = new_model.generator(_a)
SCREAMING_SNAKE_CASE : List[Any] = torch.max(torch.abs(output_converted_model - output_original_model)).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_a))
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.max(torch.abs(output_converted_generator - output_original_generator)).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_a))
SCREAMING_SNAKE_CASE : int = torch.allclose(_a , _a , atol=1E-3)
if are_identical:
logging.info("all weights are equal up to 1e-3")
else:
raise ValueError("the weights are different. The new model is likely different from the original one.")
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary")
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
a_ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
) | 25 | 0 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def __SCREAMING_SNAKE_CASE ():
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def __SCREAMING_SNAKE_CASE ():
snake_case_ = '''mock-s3-bucket'''
snake_case_ = F'''s3://{mock_bucket}'''
snake_case_ = extract_path_from_uri(SCREAMING_SNAKE_CASE__ )
assert dataset_path.startswith('''s3://''' ) is False
snake_case_ = '''./local/path'''
snake_case_ = extract_path_from_uri(SCREAMING_SNAKE_CASE__ )
assert dataset_path == new_dataset_path
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = is_remote_filesystem(SCREAMING_SNAKE_CASE__ )
assert is_remote is True
snake_case_ = fsspec.filesystem('''file''' )
snake_case_ = is_remote_filesystem(SCREAMING_SNAKE_CASE__ )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
snake_case_ = input_paths[compression_fs_class.protocol]
if input_path is None:
snake_case_ = F'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(SCREAMING_SNAKE_CASE__ )
snake_case_ = fsspec.filesystem(compression_fs_class.protocol , fo=SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = os.path.basename(SCREAMING_SNAKE_CASE__ )
snake_case_ = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(SCREAMING_SNAKE_CASE__ , '''r''' , encoding='''utf-8''' ) as f, open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
snake_case_ = compressed_file_paths[protocol]
snake_case_ = '''dataset.jsonl'''
snake_case_ = F'''{protocol}://{member_file_path}::{compressed_file_path}'''
snake_case_, *snake_case_ = fsspec.get_fs_token_paths(SCREAMING_SNAKE_CASE__ )
assert fs.isfile(SCREAMING_SNAKE_CASE__ )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = hf_api.dataset_info(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ )
snake_case_ = HfFileSystem(repo_info=SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(SCREAMING_SNAKE_CASE__ ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def __SCREAMING_SNAKE_CASE ():
snake_case_ = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , clobber=SCREAMING_SNAKE_CASE__ )
with pytest.warns(SCREAMING_SNAKE_CASE__ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(SCREAMING_SNAKE_CASE__ ) == 1
assert (
str(warning_info[0].message )
== F'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
) | 39 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
a_ = parser.parse_args()
a_ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
a_ = CLIPImageProcessor()
a_ = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
a_ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path) | 25 | 0 |
def UpperCamelCase ( snake_case__ : Optional[int] ) -> str:
UpperCamelCase : List[str] = [0] * len(snake_case__ )
UpperCamelCase : int = []
UpperCamelCase : Optional[int] = [1] * len(snake_case__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case__ ) ):
if indegree[i] == 0:
queue.append(snake_case__ )
while queue:
UpperCamelCase : Optional[int] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
UpperCamelCase : Tuple = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(snake_case__ )
print(max(snake_case__ ) )
# Adjacency list of Graph
__UpperCAmelCase = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 40 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 25 | 0 |
'''simple docstring'''
from __future__ import annotations
import pandas as pd
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = [0] * no_of_processes
__lowercase = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(A__ ):
__lowercase = burst_time[i]
__lowercase = 0
__lowercase = 0
__lowercase = 999999999
__lowercase = 0
__lowercase = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(A__ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
__lowercase = remaining_time[j]
__lowercase = j
__lowercase = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
__lowercase = remaining_time[short]
if minm == 0:
__lowercase = 999999999
if remaining_time[short] == 0:
complete += 1
__lowercase = False
# Find finish time of current process
__lowercase = increment_time + 1
# Calculate waiting time
__lowercase = finish_time - arrival_time[short]
__lowercase = finar - burst_time[short]
if waiting_time[short] < 0:
__lowercase = 0
# Increment time
increment_time += 1
return waiting_time
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = [0] * no_of_processes
for i in range(A__ ):
__lowercase = burst_time[i] + waiting_time[i]
return turn_around_time
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = 0
__lowercase = 0
for i in range(A__ ):
__lowercase = total_waiting_time + waiting_time[i]
__lowercase = total_turn_around_time + turn_around_time[i]
print(F"Average waiting time = {total_waiting_time / no_of_processes:.5f}" )
print('''Average turn around time =''' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
lowerCAmelCase__ = int(input())
lowerCAmelCase__ = [0] * no_of_processes
lowerCAmelCase__ = [0] * no_of_processes
lowerCAmelCase__ = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
lowerCAmelCase__ , lowerCAmelCase__ = map(int, input().split())
lowerCAmelCase__ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCAmelCase__ = burst_time
lowerCAmelCase__ = no_of_processes
lowerCAmelCase__ = waiting_time
lowerCAmelCase__ = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
lowerCAmelCase__ = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 41 |
from math import pi, sqrt, tan
def lowerCamelCase__ ( _a):
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values")
return 6 * side_length**2
def lowerCamelCase__ ( _a , _a , _a):
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values")
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase__ ( _a):
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values")
return 4 * pi * radius**2
def lowerCamelCase__ ( _a):
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values")
return 3 * pi * radius**2
def lowerCamelCase__ ( _a , _a):
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values")
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase__ ( _a , _a , _a):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values")
SCREAMING_SNAKE_CASE : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase__ ( _a , _a):
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values")
return 2 * pi * radius * (height + radius)
def lowerCamelCase__ ( _a , _a):
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values")
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori")
return 4 * pow(_a , 2) * torus_radius * tube_radius
def lowerCamelCase__ ( _a , _a):
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values")
return length * width
def lowerCamelCase__ ( _a):
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values")
return side_length**2
def lowerCamelCase__ ( _a , _a):
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values")
return (base * height) / 2
def lowerCamelCase__ ( _a , _a , _a):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values")
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle")
SCREAMING_SNAKE_CASE : List[str] = (sidea + sidea + sidea) / 2
SCREAMING_SNAKE_CASE : Optional[int] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea))
return area
def lowerCamelCase__ ( _a , _a):
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values")
return base * height
def lowerCamelCase__ ( _a , _a , _a):
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values")
return 1 / 2 * (basea + basea) * height
def lowerCamelCase__ ( _a):
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values")
return pi * radius**2
def lowerCamelCase__ ( _a , _a):
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values")
return pi * radius_x * radius_y
def lowerCamelCase__ ( _a , _a):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values")
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase__ ( _a , _a):
if not isinstance(_a , _a) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides")
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side")
return (sides * length**2) / (4 * tan(pi / sides))
return (sides * length**2) / (4 * tan(pi / sides))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F'''Rectangle: {area_rectangle(10, 20) = }''')
print(F'''Square: {area_square(10) = }''')
print(F'''Triangle: {area_triangle(10, 10) = }''')
print(F'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(F'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(F'''Rhombus: {area_rhombus(10, 20) = }''')
print(F'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(F'''Circle: {area_circle(20) = }''')
print(F'''Ellipse: {area_ellipse(10, 20) = }''')
print('\nSurface Areas of various geometric shapes: \n')
print(F'''Cube: {surface_area_cube(20) = }''')
print(F'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(F'''Sphere: {surface_area_sphere(20) = }''')
print(F'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(F'''Cone: {surface_area_cone(10, 20) = }''')
print(F'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(F'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(F'''Torus: {surface_area_torus(20, 10) = }''')
print(F'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(F'''Square: {area_reg_polygon(4, 10) = }''')
print(F'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''') | 25 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = OpenAIGPTTokenizer
SCREAMING_SNAKE_CASE_ = OpenAIGPTTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase_ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
lowerCamelCase_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCamelCase_ = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE_ ) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
return "lower newer", "lower newer"
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowerCamelCase_ = 'lower'
lowerCamelCase_ = ['low', 'er</w>']
lowerCamelCase_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokens + ['<unk>']
lowerCamelCase_ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_=15 ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# Simple input
lowerCamelCase_ = 'This is a simple input'
lowerCamelCase_ = ['This is a simple input 1', 'This is a simple input 2']
lowerCamelCase_ = ('This is a simple input', 'This is a pair')
lowerCamelCase_ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE_ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE_ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' , )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
pass
| 42 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 25 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase = 16
lowerCAmelCase = 32
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 16 ):
"""simple docstring"""
lowercase__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ = datasets.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ = 16
elif accelerator.mixed_precision != "no":
lowercase__ = 8
else:
lowercase__ = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE , padding='''longest''' , max_length=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
lowercase__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase = mocked_dataloaders # noqa: F811
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , SCREAMING_SNAKE_CASE ) == "1":
lowercase__ = 2
# New Code #
lowercase__ = int(args.gradient_accumulation_steps )
# Initialize accelerator
lowercase__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=SCREAMING_SNAKE_CASE )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config['''lr''']
lowercase__ = int(config['''num_epochs'''] )
lowercase__ = int(config['''seed'''] )
lowercase__ = int(config['''batch_size'''] )
lowercase__ = evaluate.load('''glue''' , '''mrpc''' )
set_seed(SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ = get_dataloaders(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE )
# Instantiate scheduler
lowercase__ = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=1_00 , num_training_steps=(len(SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(SCREAMING_SNAKE_CASE ):
lowercase__ = model(**SCREAMING_SNAKE_CASE )
lowercase__ = output.loss
accelerator.backward(SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , SCREAMING_SNAKE_CASE )
def _a ( ):
"""simple docstring"""
lowercase__ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=SCREAMING_SNAKE_CASE , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase__ = parser.parse_args()
lowercase__ = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 43 |
from __future__ import annotations
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Optional[Any] = 2
SCREAMING_SNAKE_CASE : Optional[int] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_a)
if n > 1:
factors.append(_a)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod() | 25 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json',
'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json',
'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json',
'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'mobilenet_v2'
def __init__( self : str,__A : Dict=3,__A : int=2_2_4,__A : List[Any]=1.0,__A : Any=8,__A : List[Any]=8,__A : Dict=6,__A : Tuple=3_2,__A : List[Any]=True,__A : Union[str, Any]=True,__A : int="relu6",__A : Tuple=True,__A : Optional[Any]=0.8,__A : List[Any]=0.02,__A : List[Any]=0.001,__A : str=2_5_5,**__A : Dict,):
super().__init__(**__A )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
_lowerCamelCase : List[str] = num_channels
_lowerCamelCase : Tuple = image_size
_lowerCamelCase : Optional[Any] = depth_multiplier
_lowerCamelCase : Optional[Any] = depth_divisible_by
_lowerCamelCase : Any = min_depth
_lowerCamelCase : Dict = expand_ratio
_lowerCamelCase : Tuple = output_stride
_lowerCamelCase : Tuple = first_layer_is_expansion
_lowerCamelCase : Tuple = finegrained_output
_lowerCamelCase : str = hidden_act
_lowerCamelCase : List[Any] = tf_padding
_lowerCamelCase : Dict = classifier_dropout_prob
_lowerCamelCase : Any = initializer_range
_lowerCamelCase : str = layer_norm_eps
_lowerCamelCase : Any = semantic_loss_ignore_index
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = version.parse('1.11' )
@property
def lowerCamelCase_ ( self : Dict ):
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def lowerCamelCase_ ( self : Dict ):
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def lowerCamelCase_ ( self : Dict ):
return 1e-4 | 44 |
from math import factorial, pi
def lowerCamelCase__ ( _a , _a = 30):
if not isinstance(_a , (int, float)):
raise ValueError("maclaurin_sin() requires either an int or float for theta")
if not isinstance(_a , _a) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy")
SCREAMING_SNAKE_CASE : int = float(_a)
SCREAMING_SNAKE_CASE : Dict = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1) for r in range(_a))
def lowerCamelCase__ ( _a , _a = 30):
if not isinstance(_a , (int, float)):
raise ValueError("maclaurin_cos() requires either an int or float for theta")
if not isinstance(_a , _a) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy")
SCREAMING_SNAKE_CASE : str = float(_a)
SCREAMING_SNAKE_CASE : Any = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r) for r in range(_a))
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15)) | 25 | 0 |
def A ( lowercase__ : int ) -> Optional[Any]:
stooge(lowercase__ , 0 , len(lowercase__ ) - 1 )
return arr
def A ( lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : str ) -> List[str]:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
UpperCamelCase__ , UpperCamelCase__ :List[str] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
UpperCamelCase__ :Optional[int] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(lowercase__ , i + t , (lowercase__) )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
if __name__ == "__main__":
UpperCamelCase = input("Enter numbers separated by a comma:\n").strip()
UpperCamelCase = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted)) | 45 |
from __future__ import annotations
import math
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , a : int ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = size
# approximate the overall size of segment tree with given value
SCREAMING_SNAKE_CASE : Any = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
SCREAMING_SNAKE_CASE : Union[str, Any] = [0 for i in range(0 , 4 * size )]
SCREAMING_SNAKE_CASE : Any = [0 for i in range(0 , 4 * size )] # flag for lazy update
def __UpperCamelCase ( self : Tuple , a : int ) -> int:
"""simple docstring"""
return idx * 2
def __UpperCamelCase ( self : str , a : int ) -> int:
"""simple docstring"""
return idx * 2 + 1
def __UpperCamelCase ( self : int , a : int , a : int , a : int , a : list[int] ) -> None:
"""simple docstring"""
if left_element == right_element:
SCREAMING_SNAKE_CASE : int = a[left_element - 1]
else:
SCREAMING_SNAKE_CASE : Optional[int] = (left_element + right_element) // 2
self.build(self.left(a ) , a , a , a )
self.build(self.right(a ) , mid + 1 , a , a )
SCREAMING_SNAKE_CASE : List[Any] = max(
self.segment_tree[self.left(a )] , self.segment_tree[self.right(a )] )
def __UpperCamelCase ( self : Optional[Any] , a : int , a : int , a : int , a : int , a : int , a : int ) -> bool:
"""simple docstring"""
if self.flag[idx] is True:
SCREAMING_SNAKE_CASE : Any = self.lazy[idx]
SCREAMING_SNAKE_CASE : List[str] = False
if left_element != right_element:
SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx]
SCREAMING_SNAKE_CASE : int = self.lazy[idx]
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : List[Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
SCREAMING_SNAKE_CASE : Optional[Any] = val
if left_element != right_element:
SCREAMING_SNAKE_CASE : str = val
SCREAMING_SNAKE_CASE : str = val
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : Optional[Any] = True
return True
SCREAMING_SNAKE_CASE : int = (left_element + right_element) // 2
self.update(self.left(a ) , a , a , a , a , a )
self.update(self.right(a ) , mid + 1 , a , a , a , a )
SCREAMING_SNAKE_CASE : Optional[int] = max(
self.segment_tree[self.left(a )] , self.segment_tree[self.right(a )] )
return True
def __UpperCamelCase ( self : Dict , a : int , a : int , a : int , a : int , a : int ) -> int | float:
"""simple docstring"""
if self.flag[idx] is True:
SCREAMING_SNAKE_CASE : int = self.lazy[idx]
SCREAMING_SNAKE_CASE : List[Any] = False
if left_element != right_element:
SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx]
SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx]
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
SCREAMING_SNAKE_CASE : Dict = (left_element + right_element) // 2
SCREAMING_SNAKE_CASE : Tuple = self.query(self.left(a ) , a , a , a , a )
SCREAMING_SNAKE_CASE : Tuple = self.query(self.right(a ) , mid + 1 , a , a , a )
return max(a , a )
def __str__( self : str ) -> str:
"""simple docstring"""
return str([self.query(1 , 1 , self.size , a , a ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
a_ = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
a_ = 15
a_ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt) | 25 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : str = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[int]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase : Tuple = ""
else:
_lowerCamelCase : str = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase : Dict = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : Tuple = in_proj_bias[: config.hidden_size]
_lowerCamelCase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Tuple = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : Optional[Any] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : Any = dct.pop(_lowerCamelCase )
_lowerCamelCase : Dict = val
def lowerCamelCase_( ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ) -> str:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
_lowerCamelCase : str = 8
# set labels if required
if not base_model:
_lowerCamelCase : str = 1000
_lowerCamelCase : Any = "huggingface/label-files"
_lowerCamelCase : Union[str, Any] = "imagenet-1k-id2label.json"
_lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_lowerCamelCase : int = 384
_lowerCamelCase : str = 1536
_lowerCamelCase : List[str] = 12
_lowerCamelCase : Optional[int] = 6
# load original model from torch hub
_lowerCamelCase : Union[str, Any] = torch.hub.load("facebookresearch/dino:main" , _lowerCamelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : List[str] = original_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
_lowerCamelCase : Tuple = create_rename_keys(_lowerCamelCase , base_model=_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if base_model:
_lowerCamelCase : Optional[Any] = ViTModel(_lowerCamelCase , add_pooling_layer=_lowerCamelCase ).eval()
else:
_lowerCamelCase : Union[str, Any] = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_lowerCamelCase : Tuple = ViTImageProcessor()
_lowerCamelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCamelCase : Dict = encoding["pixel_values"]
_lowerCamelCase : int = model(_lowerCamelCase )
if base_model:
_lowerCamelCase : List[str] = original_model(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
_lowerCamelCase : Tuple = original_model(_lowerCamelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
_lowerCAmelCase : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model) | 46 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
sd_pipe.set_scheduler("sample_euler" )
SCREAMING_SNAKE_CASE : Optional[int] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = sd_pipe([prompt] , generator=a , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
SCREAMING_SNAKE_CASE : Tuple = output.images
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
sd_pipe.set_scheduler("sample_euler" )
SCREAMING_SNAKE_CASE : List[str] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe([prompt] , generator=a , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
SCREAMING_SNAKE_CASE : List[Any] = output.images
SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
SCREAMING_SNAKE_CASE : str = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = sd_pipe(
[prompt] , generator=a , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=a , )
SCREAMING_SNAKE_CASE : str = output.images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 25 | 0 |
def UpperCAmelCase__ ( lowerCamelCase_ : int = 1_0_0_0_0_0_0 ):
__a : Optional[Any] = 1
__a : Tuple = 1
__a : Dict = {1: 1}
for inputa in range(2 , lowerCamelCase_ ):
__a : str = 0
__a : Optional[int] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__a : Union[str, Any] = (3 * number) + 1
counter += 1
if inputa not in counters:
__a : Union[str, Any] = counter
if counter > pre_counter:
__a : Dict = inputa
__a : Optional[int] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 47 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = ort.SessionOptions()
SCREAMING_SNAKE_CASE : Union[str, Any] = False
return options
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
SCREAMING_SNAKE_CASE : int = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Optional[Any] = "A red cat sitting on a park bench"
SCREAMING_SNAKE_CASE : Tuple = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(
prompt=a , image=a , mask_image=a , guidance_scale=7.5 , num_inference_steps=10 , generator=a , output_type="np" , )
SCREAMING_SNAKE_CASE : List[Any] = output.images
SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
SCREAMING_SNAKE_CASE : Union[str, Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=a , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : List[Any] = "A red cat sitting on a park bench"
SCREAMING_SNAKE_CASE : Dict = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe(
prompt=a , image=a , mask_image=a , guidance_scale=7.5 , num_inference_steps=20 , generator=a , output_type="np" , )
SCREAMING_SNAKE_CASE : List[str] = output.images
SCREAMING_SNAKE_CASE : Optional[int] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 | 25 | 0 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
UpperCAmelCase__ : Tuple = datasets.load_iris()
UpperCAmelCase__ : Any = np.array(data["data"])
UpperCAmelCase__ : Union[str, Any] = np.array(data["target"])
UpperCAmelCase__ : List[str] = data["target_names"]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = train_test_split(X, y)
def A ( UpperCamelCase_ : str , UpperCamelCase_ : Tuple ) -> str:
'''simple docstring'''
return np.linalg.norm(np.array(UpperCamelCase_ ) - np.array(UpperCamelCase_ ) )
def A ( UpperCamelCase_ : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : str=5 ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = zip(UpperCamelCase_ , UpperCamelCase_ )
# List of distances of all points from the point to be classified
lowerCAmelCase__ = []
for data_point in data:
lowerCAmelCase__ = euclidean_distance(data_point[0] , UpperCamelCase_ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
lowerCAmelCase__ = [i[1] for i in sorted(UpperCamelCase_ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
lowerCAmelCase__ = Counter(UpperCamelCase_ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 48 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCamelCase__ ( _a):
return getitem, k
def lowerCamelCase__ ( _a , _a):
return setitem, k, v
def lowerCamelCase__ ( _a):
return delitem, k
def lowerCamelCase__ ( _a , _a , *_a):
try:
return fun(_a , *_a), None
except Exception as e:
return None, e
a_ = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
a_ = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
a_ = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
a_ = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
a_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items"),
pytest.param(_overwrite_items , id="overwrite items"),
pytest.param(_delete_items , id="delete items"),
pytest.param(_access_absent_items , id="access absent items"),
pytest.param(_add_with_resize_up , id="add with resize up"),
pytest.param(_add_with_resize_down , id="add with resize down"),
) , )
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Dict = HashMap(initial_block_size=4)
SCREAMING_SNAKE_CASE : List[str] = {}
for _, (fun, *args) in enumerate(_a):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = _run_operation(_a , _a , *_a)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = _run_operation(_a , _a , *_a)
assert my_res == py_res
assert str(_a) == str(_a)
assert set(_a) == set(_a)
assert len(_a) == len(_a)
assert set(my.items()) == set(py.items())
def lowerCamelCase__ ( ):
def is_public(_a) -> bool:
return not name.startswith("_")
SCREAMING_SNAKE_CASE : List[str] = {name for name in dir({}) if is_public(_a)}
SCREAMING_SNAKE_CASE : Union[str, Any] = {name for name in dir(HashMap()) if is_public(_a)}
assert dict_public_names > hash_public_names | 25 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowercase : str = logging.get_logger(__name__)
_lowercase : Optional[int] = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase ):
a__ : Optional[int] = "resnet"
a__ : str = ["basic", "bottleneck"]
def __init__( self : str , _lowercase : Dict=3 , _lowercase : Optional[int]=64 , _lowercase : Dict=[2_56, 5_12, 10_24, 20_48] , _lowercase : Optional[int]=[3, 4, 6, 3] , _lowercase : List[Any]="bottleneck" , _lowercase : List[str]="relu" , _lowercase : int=False , _lowercase : Dict=None , _lowercase : str=None , **_lowercase : Any , ):
super().__init__(**_lowercase )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
__UpperCAmelCase = num_channels
__UpperCAmelCase = embedding_size
__UpperCAmelCase = hidden_sizes
__UpperCAmelCase = depths
__UpperCAmelCase = layer_type
__UpperCAmelCase = hidden_act
__UpperCAmelCase = downsample_in_first_stage
__UpperCAmelCase = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(_lowercase ) + 1 )]
__UpperCAmelCase , __UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Tuple = version.parse("1.11" )
@property
def a ( self : Optional[Any] ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def a ( self : int ):
return 1E-3
| 49 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {'configuration_van': ['VAN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VanConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'VAN_PRETRAINED_MODEL_ARCHIVE_LIST',
'VanForImageClassification',
'VanModel',
'VanPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 25 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase__ (a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = ProphetNetTokenizer
_UpperCamelCase = False
def UpperCamelCase_ ( self ):
super().setUp()
lowerCamelCase__ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowerCamelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = """UNwant\u00E9d,running"""
lowerCamelCase__ = """unwanted, running"""
return input_text, output_text
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.tokenizer_class(self.vocab_file )
lowerCamelCase__ = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(_lowerCAmelCase ,["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) ,[9, 6, 7, 12, 10, 11] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) ,["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = BasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = BasicTokenizer(do_lower_case=_lowerCAmelCase ,strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHรคLLo!how \n Are yoU? """ ) ,["""hรคllo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""h\u00E9llo"""] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = BasicTokenizer(do_lower_case=_lowerCAmelCase ,strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHรคLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = BasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHรคLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = BasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = BasicTokenizer(do_lower_case=_lowerCAmelCase ,strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHรคLLo!how \n Are yoU? """ ) ,["""HรคLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = BasicTokenizer(do_lower_case=_lowerCAmelCase ,strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHรคLLo!how \n Are yoU? """ ) ,["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = BasicTokenizer(do_lower_case=_lowerCAmelCase ,never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
lowerCamelCase__ = {}
for i, token in enumerate(_lowerCAmelCase ):
lowerCamelCase__ = i
lowerCamelCase__ = WordpieceTokenizer(vocab=_lowerCAmelCase ,unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) ,[] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) ,["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) ,["""[UNK]""", """runn""", """##ing"""] )
@require_torch
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
lowerCamelCase__ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowerCamelCase__ = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02]
lowerCamelCase__ = tokenizer(_lowerCAmelCase ,padding=_lowerCAmelCase ,return_tensors="""pt""" )
self.assertIsInstance(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
def UpperCamelCase_ ( self ):
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def UpperCamelCase_ ( self ):
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def UpperCamelCase_ ( self ):
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
@slow
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
lowerCamelCase__ = tokenizer.encode("""sequence builders""" ,add_special_tokens=_lowerCAmelCase )
lowerCamelCase__ = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=_lowerCAmelCase )
lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase ,_lowerCAmelCase )
assert encoded_sentence == text + [1_02]
assert encoded_pair == text + [1_02] + text_a + [1_02]
| 50 |
from __future__ import annotations
def lowerCamelCase__ ( _a):
if len(_a) == 0:
return []
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = min(_a), max(_a)
SCREAMING_SNAKE_CASE : Dict = int(max_value - min_value) + 1
SCREAMING_SNAKE_CASE : list[list] = [[] for _ in range(_a)]
for i in my_list:
buckets[int(i - min_value)].append(_a)
return [v for bucket in buckets for v in sorted(_a)]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15] | 25 | 0 |
'''simple docstring'''
from collections import deque
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : List[Any] , a__ : str , a__ : int , a__ : int ):
UpperCAmelCase = process_name # process name
UpperCAmelCase = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
UpperCAmelCase = arrival_time
UpperCAmelCase = burst_time # remaining burst time
UpperCAmelCase = 0 # total time of the process wait in ready queue
UpperCAmelCase = 0 # time from arrival time to completion time
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Tuple , a__ : int , a__ : list[int] , a__ : deque[Process] , a__ : int , ):
# total number of mlfq's queues
UpperCAmelCase = number_of_queues
# time slice of queues that round robin algorithm applied
UpperCAmelCase = time_slices
# unfinished process is in this ready_queue
UpperCAmelCase = queue
# current time
UpperCAmelCase = current_time
# finished process is in this sequence queue
UpperCAmelCase = deque()
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __snake_case ( self : Any , a__ : list[Process] ):
UpperCAmelCase = []
for i in range(len(a__ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __snake_case ( self : Any , a__ : list[Process] ):
UpperCAmelCase = []
for i in range(len(a__ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __snake_case ( self : Optional[int] , a__ : list[Process] ):
UpperCAmelCase = []
for i in range(len(a__ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __snake_case ( self : List[Any] , a__ : deque[Process] ):
return [q.burst_time for q in queue]
def __snake_case ( self : Optional[int] , a__ : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __snake_case ( self : Union[str, Any] , a__ : deque[Process] ):
UpperCAmelCase = deque() # sequence deque of finished process
while len(a__ ) != 0:
UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(a__ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
UpperCAmelCase = 0
# set the process's turnaround time because it is finished
UpperCAmelCase = self.current_time - cp.arrival_time
# set the completion time
UpperCAmelCase = self.current_time
# add the process to queue that has finished queue
finished.append(a__ )
self.finish_queue.extend(a__ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __snake_case ( self : Dict , a__ : deque[Process] , a__ : int ):
UpperCAmelCase = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(a__ ) ):
UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(a__ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
UpperCAmelCase = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(a__ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
UpperCAmelCase = 0
# set the finish time
UpperCAmelCase = self.current_time
# update the process' turnaround time because it is finished
UpperCAmelCase = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(a__ )
self.finish_queue.extend(a__ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __snake_case ( self : Union[str, Any] ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
UpperCAmelCase, UpperCAmelCase = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
a__ : int = Process('P1', 0, 53)
a__ : str = Process('P2', 0, 17)
a__ : int = Process('P3', 0, 68)
a__ : str = Process('P4', 0, 24)
a__ : Union[str, Any] = 3
a__ : Dict = [17, 25]
a__ : Union[str, Any] = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
a__ : List[str] = Process('P1', 0, 53)
a__ : int = Process('P2', 0, 17)
a__ : List[str] = Process('P3', 0, 68)
a__ : Tuple = Process('P4', 0, 24)
a__ : Optional[Any] = 3
a__ : Optional[int] = [17, 25]
a__ : List[str] = deque([Pa, Pa, Pa, Pa])
a__ : int = MLFQ(number_of_queues, time_slices, queue, 0)
a__ : List[str] = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
F"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
F"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 51 |
a_ = frozenset(
[
'prompt',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
a_ = frozenset(['prompt', 'negative_prompt'])
a_ = frozenset([])
a_ = frozenset(['image'])
a_ = frozenset(
[
'image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['image'])
a_ = frozenset(
[
'prompt',
'image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
a_ = frozenset(['prompt', 'image', 'negative_prompt'])
a_ = frozenset(
[
# Text guided image variation with an image mask
'prompt',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
a_ = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt'])
a_ = frozenset(
[
# image variation with an image mask
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['image', 'mask_image'])
a_ = frozenset(
[
'example_image',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['example_image', 'image', 'mask_image'])
a_ = frozenset(['class_labels'])
a_ = frozenset(['class_labels'])
a_ = frozenset(['batch_size'])
a_ = frozenset([])
a_ = frozenset(['batch_size'])
a_ = frozenset([])
a_ = frozenset(
[
'prompt',
'audio_length_in_s',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
a_ = frozenset(['prompt', 'negative_prompt'])
a_ = frozenset(['input_tokens'])
a_ = frozenset(['input_tokens']) | 25 | 0 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __A ( a_ :int , a_ :Union[str, Any]) -> int:
assert isinstance(a_ , a_)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True])
def __A ( a_ :str , a_ :Tuple , a_ :Optional[int]) -> Optional[int]:
__a : Union[str, Any] = tmp_path / '''cache'''
__a : Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__a : Optional[int] = JsonDatasetReader(a_ , cache_dir=a_ , keep_in_memory=a_).read()
_check_json_dataset(a_ , a_)
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __A ( a_ :List[str] , a_ :List[str] , a_ :Any) -> Dict:
__a : Optional[int] = tmp_path / '''cache'''
__a : str = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__a : Tuple = features.copy() if features else default_expected_features
__a : Dict = (
Features({feature: Value(a_) for feature, dtype in features.items()}) if features is not None else None
)
__a : int = JsonDatasetReader(a_ , features=a_ , cache_dir=a_).read()
_check_json_dataset(a_ , a_)
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def __A ( a_ :Any , a_ :List[Any] , a_ :Union[str, Any]) -> Optional[Any]:
__a : List[str] = tmp_path / '''cache'''
__a : Tuple = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
__a : str = features.copy() if features else default_expected_features
__a : Optional[int] = (
Features({feature: Value(a_) for feature, dtype in features.items()}) if features is not None else None
)
__a : int = JsonDatasetReader(a_ , features=a_ , cache_dir=a_).read()
assert isinstance(a_ , a_)
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def __A ( a_ :Optional[Any] , a_ :List[str]) -> Tuple:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__a : List[Any] = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
__a : Dict = features.copy()
__a : List[Any] = (
Features({feature: Value(a_) for feature, dtype in features.items()}) if features is not None else None
)
__a : List[Any] = tmp_path / '''cache'''
__a : Optional[Any] = JsonDatasetReader(a_ , features=a_ , cache_dir=a_).read()
assert isinstance(a_ , a_)
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train'''), '''train''', '''test'''])
def __A ( a_ :Tuple , a_ :Optional[int] , a_ :List[Any]) -> int:
__a : Tuple = tmp_path / '''cache'''
__a : Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__a : List[str] = JsonDatasetReader(a_ , cache_dir=a_ , split=a_).read()
_check_json_dataset(a_ , a_)
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list])
def __A ( a_ :Tuple , a_ :Dict , a_ :Any) -> Optional[int]:
if issubclass(a_ , a_):
__a : List[str] = jsonl_path
elif issubclass(a_ , a_):
__a : str = [jsonl_path]
__a : Optional[int] = tmp_path / '''cache'''
__a : List[str] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__a : Dict = JsonDatasetReader(a_ , cache_dir=a_).read()
_check_json_dataset(a_ , a_)
def __A ( a_ :int , a_ :Union[str, Any] , a_ :Any=("train",)) -> List[str]:
assert isinstance(a_ , a_)
for split in splits:
__a : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True])
def __A ( a_ :int , a_ :List[Any] , a_ :Any) -> Optional[Any]:
__a : str = tmp_path / '''cache'''
__a : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__a : List[Any] = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=a_ , keep_in_memory=a_).read()
_check_json_datasetdict(a_ , a_)
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __A ( a_ :Tuple , a_ :str , a_ :Dict) -> List[str]:
__a : List[str] = tmp_path / '''cache'''
__a : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__a : List[str] = features.copy() if features else default_expected_features
__a : Optional[Any] = (
Features({feature: Value(a_) for feature, dtype in features.items()}) if features is not None else None
)
__a : Optional[int] = JsonDatasetReader({'''train''': jsonl_path} , features=a_ , cache_dir=a_).read()
_check_json_datasetdict(a_ , a_)
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train'''), '''train''', '''test'''])
def __A ( a_ :Dict , a_ :Union[str, Any] , a_ :Optional[Any]) -> Any:
if split:
__a : List[Any] = {split: jsonl_path}
else:
__a : int = '''train'''
__a : str = {'''train''': jsonl_path, '''test''': jsonl_path}
__a : int = tmp_path / '''cache'''
__a : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__a : int = JsonDatasetReader(a_ , cache_dir=a_).read()
_check_json_datasetdict(a_ , a_ , splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def __A ( a_ :Tuple) -> Optional[Any]:
return json.load(a_)
def __A ( a_ :Optional[Any]) -> Any:
return [json.loads(a_) for line in buffer]
class __lowercase :
'''simple docstring'''
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_UpperCAmelCase , _UpperCAmelCase , lines=_UpperCAmelCase ).write()
buffer.seek(0 )
__a : Dict = load_json_function(_UpperCAmelCase )
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
assert isinstance(exported_content[0] , _UpperCAmelCase )
assert len(_UpperCAmelCase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_UpperCAmelCase , _UpperCAmelCase , lines=_UpperCAmelCase , orient=_UpperCAmelCase ).write()
buffer.seek(0 )
__a : int = load_json(_UpperCAmelCase )
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_UpperCAmelCase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_UpperCAmelCase ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_UpperCAmelCase , _UpperCAmelCase , lines=_UpperCAmelCase , num_proc=2 ).write()
buffer.seek(0 )
__a : Tuple = load_json_function(_UpperCAmelCase )
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
assert isinstance(exported_content[0] , _UpperCAmelCase )
assert len(_UpperCAmelCase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_UpperCAmelCase , _UpperCAmelCase , lines=_UpperCAmelCase , orient=_UpperCAmelCase , num_proc=2 ).write()
buffer.seek(0 )
__a : List[str] = load_json(_UpperCAmelCase )
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_UpperCAmelCase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_UpperCAmelCase ) == 10
def _lowerCamelCase ( self , _UpperCAmelCase ):
with pytest.raises(_UpperCAmelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_UpperCAmelCase , _UpperCAmelCase , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Any = tmp_path_factory.mktemp('''data''' ) / f"""test.json.{extension}"""
__a : int = str(shared_datadir / f"""test_file.json.{extension}""" )
JsonDatasetWriter(_UpperCAmelCase , _UpperCAmelCase , compression=_UpperCAmelCase ).write()
with fsspec.open(_UpperCAmelCase , '''rb''' , compression='''infer''' ) as f:
__a : List[Any] = f.read()
with fsspec.open(_UpperCAmelCase , '''rb''' , compression='''infer''' ) as f:
__a : Optional[Any] = f.read()
assert exported_content == original_content | 52 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a_ = get_logger()
a_ = None
class _UpperCamelCase ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
'''simple docstring'''
def __init__( self : Optional[Any] , a : str=None , a : List[Any]=None , **a : Any ) -> Optional[Any]:
"""simple docstring"""
super().__init__(features=a )
import jax
from jaxlib.xla_client import Device
if isinstance(a , a ):
raise ValueError(
F"Expected {device} to be a `str` not {type(a )}, as `jaxlib.xla_extension.Device` "
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
SCREAMING_SNAKE_CASE : List[str] = device if isinstance(a , a ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE : str = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"Device with string identifier {self.device} not listed among the available "
F"devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default "
F"device: {str(jax.devices()[0] )}." )
SCREAMING_SNAKE_CASE : Any = str(jax.devices()[0] )
SCREAMING_SNAKE_CASE : Any = jnp_array_kwargs
@staticmethod
def __UpperCamelCase ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
"""simple docstring"""
import jax
return {str(a ): device for device in jax.devices()}
def __UpperCamelCase ( self : Dict , a : Tuple ) -> str:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(a , a ) and column:
if all(
isinstance(a , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(a , axis=0 )
return column
def __UpperCamelCase ( self : Dict , a : str ) -> str:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(a , (str, bytes, type(a )) ):
return value
elif isinstance(a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
SCREAMING_SNAKE_CASE : Dict = {"dtype": jnp.intaa}
else:
SCREAMING_SNAKE_CASE : str = {"dtype": jnp.intaa}
elif isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
SCREAMING_SNAKE_CASE : int = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a , PIL.Image.Image ):
SCREAMING_SNAKE_CASE : Dict = np.asarray(a )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE : Optional[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(a , **{**default_dtype, **self.jnp_array_kwargs} )
def __UpperCamelCase ( self : Any , a : List[str] ) -> Dict:
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(a , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(a , "__array__" ) and not isinstance(a , jax.Array ):
SCREAMING_SNAKE_CASE : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
elif isinstance(a , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
return self._tensorize(a )
def __UpperCamelCase ( self : Optional[Any] , a : dict ) -> Dict:
"""simple docstring"""
return map_nested(self._recursive_tensorize , a , map_list=a )
def __UpperCamelCase ( self : Dict , a : pa.Table ) -> Mapping:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.numpy_arrow_extractor().extract_row(a )
SCREAMING_SNAKE_CASE : List[Any] = self.python_features_decoder.decode_row(a )
return self.recursive_tensorize(a )
def __UpperCamelCase ( self : Optional[int] , a : pa.Table ) -> "jax.Array":
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.numpy_arrow_extractor().extract_column(a )
SCREAMING_SNAKE_CASE : Optional[Any] = self.python_features_decoder.decode_column(a , pa_table.column_names[0] )
SCREAMING_SNAKE_CASE : Tuple = self.recursive_tensorize(a )
SCREAMING_SNAKE_CASE : Optional[int] = self._consolidate(a )
return column
def __UpperCamelCase ( self : List[Any] , a : pa.Table ) -> Mapping:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.numpy_arrow_extractor().extract_batch(a )
SCREAMING_SNAKE_CASE : str = self.python_features_decoder.decode_batch(a )
SCREAMING_SNAKE_CASE : List[Any] = self.recursive_tensorize(a )
for column_name in batch:
SCREAMING_SNAKE_CASE : List[Any] = self._consolidate(batch[column_name] )
return batch | 25 | 0 |
# Function to print upper half of diamond (pyramid)
def a_ ( lowerCAmelCase_ : Optional[int] ):
for i in range(0, lowerCAmelCase_ ):
for _ in range(0, n - i - 1 ): # printing spaces
print(' ', end='' )
for _ in range(0, i + 1 ): # printing stars
print('* ', end='' )
print()
def a_ ( lowerCAmelCase_ : str ):
for i in range(lowerCAmelCase_, 0, -1 ):
for _ in range(lowerCAmelCase_, 0, -1 ): # printing stars
print('* ', end='' )
print()
for _ in range(n - i + 1, 0, -1 ): # printing spaces
print(' ', end='' )
def a_ ( lowerCAmelCase_ : Union[str, Any] ):
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowerCAmelCase_ ) # upper half
reverse_floyd(lowerCAmelCase_ ) # lower half
if __name__ == "__main__":
print(R'| /\ | |- | |- |--| |\ /| |-')
print(R'|/ \| |- |_ |_ |__| | \/ | |_')
_snake_case : Optional[Any] = 1
while K:
_snake_case : Any = int(input('enter the number and , and see the magic : '))
print()
pretty_print(user_number)
_snake_case : Optional[int] = int(input('press 0 to exit... and 1 to continue...'))
print('Good Bye...')
| 53 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _UpperCamelCase :
'''simple docstring'''
@staticmethod
def __UpperCamelCase ( *a : str , **a : int ) -> str:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =MODEL_FOR_OBJECT_DETECTION_MAPPING
def __UpperCamelCase ( self : Optional[Any] , a : str , a : Optional[Any] , a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = ObjectDetectionPipeline(model=a , image_processor=a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __UpperCamelCase ( self : List[Any] , a : Optional[int] , a : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(a ) , 0 )
for detected_object in outputs:
self.assertEqual(
a , {
"score": ANY(a ),
"label": ANY(a ),
"box": {"xmin": ANY(a ), "ymin": ANY(a ), "xmax": ANY(a ), "ymax": ANY(a )},
} , )
import datasets
SCREAMING_SNAKE_CASE : Any = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
SCREAMING_SNAKE_CASE : Dict = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
SCREAMING_SNAKE_CASE : Tuple = object_detector(a , threshold=0.0 )
self.assertEqual(len(a ) , len(a ) )
for outputs in batch_outputs:
self.assertGreater(len(a ) , 0 )
for detected_object in outputs:
self.assertEqual(
a , {
"score": ANY(a ),
"label": ANY(a ),
"box": {"xmin": ANY(a ), "ymin": ANY(a ), "xmax": ANY(a ), "ymax": ANY(a )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def __UpperCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
pass
@require_torch
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = "hf-internal-testing/tiny-detr-mobilenetsv3"
SCREAMING_SNAKE_CASE : Dict = AutoModelForObjectDetection.from_pretrained(a )
SCREAMING_SNAKE_CASE : Any = AutoFeatureExtractor.from_pretrained(a )
SCREAMING_SNAKE_CASE : Tuple = ObjectDetectionPipeline(model=a , feature_extractor=a )
SCREAMING_SNAKE_CASE : int = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
SCREAMING_SNAKE_CASE : Dict = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(a )
SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained(a )
SCREAMING_SNAKE_CASE : int = ObjectDetectionPipeline(model=a , feature_extractor=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
SCREAMING_SNAKE_CASE : int = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : Tuple = pipeline("object-detection" , model=a )
SCREAMING_SNAKE_CASE : Tuple = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
SCREAMING_SNAKE_CASE : str = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = 0.9985
SCREAMING_SNAKE_CASE : int = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : List[str] = pipeline("object-detection" , model=a )
SCREAMING_SNAKE_CASE : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=a )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = "Narsil/layoutlmv3-finetuned-funsd"
SCREAMING_SNAKE_CASE : Dict = 0.9993
SCREAMING_SNAKE_CASE : str = pipeline("object-detection" , model=a , threshold=a )
SCREAMING_SNAKE_CASE : List[Any] = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , ) | 25 | 0 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class A ( tf.keras.layers.Layer ):
def __init__( self: Optional[Any] , _lowerCAmelCase: Dict[str, int] , _lowerCAmelCase: List[str] , _lowerCAmelCase: int = None , _lowerCAmelCase: int = None ) -> List[Any]:
'''simple docstring'''
super().__init__()
UpperCAmelCase_ =pad_token_id
UpperCAmelCase_ =max_length
UpperCAmelCase_ =vocab
UpperCAmelCase_ =merges
UpperCAmelCase_ =BytePairTokenizer(_lowerCAmelCase , _lowerCAmelCase , sequence_length=_lowerCAmelCase )
@classmethod
def lowerCAmelCase__ ( cls: Union[str, Any] , _lowerCAmelCase: GPTaTokenizer , *_lowerCAmelCase: List[Any] , **_lowerCAmelCase: Dict ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =[" ".join(_lowerCAmelCase ) for m in tokenizer.bpe_ranks.keys()]
UpperCAmelCase_ =tokenizer.get_vocab()
return cls(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def lowerCAmelCase__ ( cls: Dict , _lowerCAmelCase: Union[str, os.PathLike] , *_lowerCAmelCase: List[Any] , **_lowerCAmelCase: List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =GPTaTokenizer.from_pretrained(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
return cls.from_tokenizer(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def lowerCAmelCase__ ( cls: List[str] , _lowerCAmelCase: Optional[int] ) -> List[Any]:
'''simple docstring'''
return cls(**_lowerCAmelCase )
def lowerCAmelCase__ ( self: Dict ) -> int:
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: List[str] , _lowerCAmelCase: int = None ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.tf_tokenizer(_lowerCAmelCase )
UpperCAmelCase_ =tf.ones_like(_lowerCAmelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
UpperCAmelCase_ =max_length if max_length is not None else self.max_length
if max_length is not None:
UpperCAmelCase_ , UpperCAmelCase_ =pad_model_inputs(
_lowerCAmelCase , max_seq_length=_lowerCAmelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 54 |
def lowerCamelCase__ ( _a):
if not isinstance(_a , _a):
SCREAMING_SNAKE_CASE : Tuple = f"Input value of [number={number}] must be an integer"
raise TypeError(_a)
if number < 0:
return False
SCREAMING_SNAKE_CASE : Union[str, Any] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 25 | 0 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[Any] ,A : AutoencoderKL ,A : CLIPTextModel ,A : CLIPTokenizer ,A : UNetaDConditionModel ,A : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] ,A : StableDiffusionSafetyChecker ,A : CLIPImageProcessor ,):
super().__init__()
self.register_modules(
vae=A ,text_encoder=A ,tokenizer=A ,unet=A ,scheduler=A ,safety_checker=A ,feature_extractor=A ,)
def UpperCamelCase_ ( self : Tuple ,A : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__A = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def UpperCamelCase_ ( self : Tuple ):
self.enable_attention_slicing(A )
@torch.no_grad()
def __call__( self : Dict ,A : Union[str, List[str]] ,A : int = 5_12 ,A : int = 5_12 ,A : int = 50 ,A : float = 7.5 ,A : Optional[Union[str, List[str]]] = None ,A : Optional[int] = 1 ,A : float = 0.0 ,A : Optional[torch.Generator] = None ,A : Optional[torch.FloatTensor] = None ,A : Optional[str] = "pil" ,A : bool = True ,A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,A : int = 1 ,A : Optional[torch.FloatTensor] = None ,**A : Optional[Any] ,):
if isinstance(A ,A ):
__A = 1
elif isinstance(A ,A ):
__A = len(A )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(A )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A ,A ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(A )}.''' )
# get prompt text embeddings
__A = self.tokenizer(
A ,padding="max_length" ,max_length=self.tokenizer.model_max_length ,return_tensors="pt" ,)
__A = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__A = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__A = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__A , __A , __A = text_embeddings.shape
__A = text_embeddings.repeat(1 ,A ,1 )
__A = text_embeddings.view(bs_embed * num_images_per_prompt ,A ,-1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__A = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__A = 42
if negative_prompt is None:
__A = [""]
elif type(A ) is not type(A ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(A )} !='''
f''' {type(A )}.''' )
elif isinstance(A ,A ):
__A = [negative_prompt]
elif batch_size != len(A ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
" the batch size of `prompt`." )
else:
__A = negative_prompt
__A = text_input_ids.shape[-1]
__A = self.tokenizer(
A ,padding="max_length" ,max_length=A ,truncation=A ,return_tensors="pt" ,)
__A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__A = uncond_embeddings.shape[1]
__A = uncond_embeddings.repeat(A ,A ,1 )
__A = uncond_embeddings.view(batch_size * num_images_per_prompt ,A ,-1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__A = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__A = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__A = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__A = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__A = torch.randn(
A ,generator=A ,device="cpu" ,dtype=A ).to(self.device )
__A = torch.randn(A ,generator=A ,device="cpu" ,dtype=A ).to(
self.device )
else:
__A = torch.randn(
A ,generator=A ,device=self.device ,dtype=A )
__A = torch.randn(A ,generator=A ,device=self.device ,dtype=A )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__A = latents_reference.to(self.device )
__A = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__A = (latents_shape[3] - latents_shape_reference[3]) // 2
__A = (latents_shape[2] - latents_shape_reference[2]) // 2
__A = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__A = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__A = 0 if dx < 0 else dx
__A = 0 if dy < 0 else dy
__A = max(-dx ,0 )
__A = max(-dy ,0 )
# import pdb
# pdb.set_trace()
__A = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__A = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__A = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__A = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__A = {}
if accepts_eta:
__A = eta
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
__A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__A = self.scheduler.scale_model_input(A ,A )
# predict the noise residual
__A = self.unet(A ,A ,encoder_hidden_states=A ).sample
# perform guidance
if do_classifier_free_guidance:
__A , __A = noise_pred.chunk(2 )
__A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__A = self.scheduler.step(A ,A ,A ,**A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A ,A ,A )
__A = 1 / 0.1_82_15 * latents
__A = self.vae.decode(A ).sample
__A = (image / 2 + 0.5).clamp(0 ,1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__A = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if self.safety_checker is not None:
__A = self.feature_extractor(self.numpy_to_pil(A ) ,return_tensors="pt" ).to(
self.device )
__A , __A = self.safety_checker(
images=A ,clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__A = None
if output_type == "pil":
__A = self.numpy_to_pil(A )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=A ,nsfw_content_detected=A )
| 55 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __init__( self : Dict , a : Tuple , a : Any=13 , a : Any=7 , a : Union[str, Any]=True , a : List[Any]=True , a : List[str]=False , a : List[str]=True , a : Any=99 , a : str=32 , a : Any=5 , a : Optional[int]=4 , a : Union[str, Any]=37 , a : Dict="gelu" , a : List[Any]=0.1 , a : Optional[Any]=0.1 , a : List[str]=512 , a : Union[str, Any]=16 , a : str=2 , a : Dict=0.02 , a : Optional[int]=3 , a : Union[str, Any]=4 , a : int=None , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Any = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = seq_length
SCREAMING_SNAKE_CASE : List[Any] = is_training
SCREAMING_SNAKE_CASE : int = use_input_mask
SCREAMING_SNAKE_CASE : Tuple = use_token_type_ids
SCREAMING_SNAKE_CASE : str = use_labels
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Tuple = num_labels
SCREAMING_SNAKE_CASE : Tuple = num_choices
SCREAMING_SNAKE_CASE : Optional[Any] = scope
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : str = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self : Optional[Any] , a : int , a : Optional[int] , a : Optional[int] , a : Dict , a : str , a : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = DistilBertModel(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(a , a )
SCREAMING_SNAKE_CASE : Optional[Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Tuple , a : Optional[int] , a : Dict , a : Tuple , a : int , a : int , a : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = DistilBertForMaskedLM(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : str = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : List[Any] , a : int , a : Optional[Any] , a : Optional[Any] , a : str , a : str , a : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Optional[int] , a : str , a : Any , a : int , a : Optional[Any] , a : int , a : str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = DistilBertForSequenceClassification(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Optional[Any] , a : List[Any] , a : Optional[int] , a : Union[str, Any] , a : Dict , a : Any , a : Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : List[str] = DistilBertForTokenClassification(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : int , a : Any , a : Optional[int] , a : Union[str, Any] , a : Tuple , a : Optional[int] , a : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.num_choices
SCREAMING_SNAKE_CASE : Any = DistilBertForMultipleChoice(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Optional[Any] = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE)) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ =(
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = DistilBertModelTester(self )
SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=a , dim=37 )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a )
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a )
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a )
def __UpperCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a )
@slow
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Any = model_class(config=a )
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(a , a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.jit.trace(
a , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , "traced_model.pt" ) )
SCREAMING_SNAKE_CASE : Tuple = torch.jit.load(os.path.join(a , "traced_model.pt" ) , map_location=a )
loaded(inputs_dict["input_ids"].to(a ) , inputs_dict["attention_mask"].to(a ) )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = DistilBertModel.from_pretrained("distilbert-base-uncased" )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(a , attention_mask=a )[0]
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) ) | 25 | 0 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_a : Optional[Any] = logging.get_logger(__name__)
# General docstring
_a : Optional[Any] = "RegNetConfig"
# Base docstring
_a : int = "facebook/regnet-y-040"
_a : Tuple = [1, 1_088, 7, 7]
# Image classification docstring
_a : List[str] = "facebook/regnet-y-040"
_a : Any = "tabby, tabby cat"
_a : Tuple = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _lowercase ( nn.Module ):
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 3 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : Optional[str] = "relu" , ) -> List[str]:
super().__init__()
__snake_case = nn.Convad(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , padding=kernel_size // 2 , groups=SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ , )
__snake_case = nn.BatchNormad(SCREAMING_SNAKE_CASE_ )
__snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def a ( self : int , SCREAMING_SNAKE_CASE_ : Dict ) -> Dict:
__snake_case = self.convolution(SCREAMING_SNAKE_CASE_ )
__snake_case = self.normalization(SCREAMING_SNAKE_CASE_ )
__snake_case = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class _lowercase ( nn.Module ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : RegNetConfig ) -> Dict:
super().__init__()
__snake_case = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
__snake_case = config.num_channels
def a ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict:
__snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
__snake_case = self.embedder(SCREAMING_SNAKE_CASE_ )
return hidden_state
class _lowercase ( nn.Module ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 ) -> List[str]:
super().__init__()
__snake_case = nn.Convad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=1 , stride=SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ )
__snake_case = nn.BatchNormad(SCREAMING_SNAKE_CASE_ )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tensor ) -> Tensor:
__snake_case = self.convolution(SCREAMING_SNAKE_CASE_ )
__snake_case = self.normalization(SCREAMING_SNAKE_CASE_ )
return hidden_state
class _lowercase ( nn.Module ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> int:
super().__init__()
__snake_case = nn.AdaptiveAvgPoolad((1, 1) )
__snake_case = nn.Sequential(
nn.Convad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=1 ) , nn.Sigmoid() , )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[Any]:
# b c h w -> b c 1 1
__snake_case = self.pooler(SCREAMING_SNAKE_CASE_ )
__snake_case = self.attention(SCREAMING_SNAKE_CASE_ )
__snake_case = hidden_state * attention
return hidden_state
class _lowercase ( nn.Module ):
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 ) -> Tuple:
super().__init__()
__snake_case = in_channels != out_channels or stride != 1
__snake_case = max(1 , out_channels // config.groups_width )
__snake_case = (
RegNetShortCut(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ ) if should_apply_shortcut else nn.Identity()
)
__snake_case = nn.Sequential(
RegNetConvLayer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act ) , RegNetConvLayer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ ) , )
__snake_case = ACTaFN[config.hidden_act]
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[str]:
__snake_case = hidden_state
__snake_case = self.layer(SCREAMING_SNAKE_CASE_ )
__snake_case = self.shortcut(SCREAMING_SNAKE_CASE_ )
hidden_state += residual
__snake_case = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class _lowercase ( nn.Module ):
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 ) -> Optional[Any]:
super().__init__()
__snake_case = in_channels != out_channels or stride != 1
__snake_case = max(1 , out_channels // config.groups_width )
__snake_case = (
RegNetShortCut(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ ) if should_apply_shortcut else nn.Identity()
)
__snake_case = nn.Sequential(
RegNetConvLayer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act ) , RegNetSELayer(SCREAMING_SNAKE_CASE_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ ) , )
__snake_case = ACTaFN[config.hidden_act]
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
__snake_case = hidden_state
__snake_case = self.layer(SCREAMING_SNAKE_CASE_ )
__snake_case = self.shortcut(SCREAMING_SNAKE_CASE_ )
hidden_state += residual
__snake_case = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class _lowercase ( nn.Module ):
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , ) -> Any:
super().__init__()
__snake_case = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
__snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , ) , *[layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for _ in range(depth - 1 )] , )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : str ) -> Any:
__snake_case = self.layers(SCREAMING_SNAKE_CASE_ )
return hidden_state
class _lowercase ( nn.Module ):
def __init__( self : int , SCREAMING_SNAKE_CASE_ : RegNetConfig ) -> Union[str, Any]:
super().__init__()
__snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
SCREAMING_SNAKE_CASE_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(SCREAMING_SNAKE_CASE_ , config.depths[1:] ):
self.stages.append(RegNetStage(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , depth=SCREAMING_SNAKE_CASE_ ) )
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tensor , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = True ) -> BaseModelOutputWithNoAttention:
__snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__snake_case = hidden_states + (hidden_state,)
__snake_case = stage_module(SCREAMING_SNAKE_CASE_ )
if output_hidden_states:
__snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_ )
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = RegNetConfig
_SCREAMING_SNAKE_CASE : Optional[int] = "regnet"
_SCREAMING_SNAKE_CASE : int = "pixel_values"
_SCREAMING_SNAKE_CASE : Optional[int] = True
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]:
if isinstance(SCREAMING_SNAKE_CASE_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(SCREAMING_SNAKE_CASE_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def a ( self : int , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple=False ) -> Tuple:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__snake_case = value
_a : Union[str, Any] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_a : Dict = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , __lowercase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class _lowercase ( __lowercase ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
super().__init__(SCREAMING_SNAKE_CASE_ )
__snake_case = config
__snake_case = RegNetEmbeddings(SCREAMING_SNAKE_CASE_ )
__snake_case = RegNetEncoder(SCREAMING_SNAKE_CASE_ )
__snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
__snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case = self.embedder(SCREAMING_SNAKE_CASE_ )
__snake_case = self.encoder(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
__snake_case = encoder_outputs[0]
__snake_case = self.pooler(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE_ , pooler_output=SCREAMING_SNAKE_CASE_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , __lowercase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class _lowercase ( __lowercase ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Any:
super().__init__(SCREAMING_SNAKE_CASE_ )
__snake_case = config.num_labels
__snake_case = RegNetModel(SCREAMING_SNAKE_CASE_ )
# classification head
__snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a ( self : int , SCREAMING_SNAKE_CASE_ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE_ : Optional[torch.LongTensor] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
__snake_case = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case = self.regnet(SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
__snake_case = outputs.pooler_output if return_dict else outputs[1]
__snake_case = self.classifier(SCREAMING_SNAKE_CASE_ )
__snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__snake_case = 'single_label_classification'
else:
__snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
__snake_case = MSELoss()
if self.num_labels == 1:
__snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__snake_case = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif self.config.problem_type == "single_label_classification":
__snake_case = CrossEntropyLoss()
__snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__snake_case = BCEWithLogitsLoss()
__snake_case = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not return_dict:
__snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states )
| 56 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 25 | 0 |
import tensorflow as tf
from ...tf_utils import shape_list
class _lowerCAmelCase( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=1 , _lowerCamelCase=False , **_lowerCamelCase ):
super().__init__(**_lowerCamelCase )
UpperCamelCase_: Dict = vocab_size
UpperCamelCase_: Union[str, Any] = d_embed
UpperCamelCase_: List[Any] = d_proj
UpperCamelCase_: Tuple = cutoffs + [vocab_size]
UpperCamelCase_: Any = [0] + self.cutoffs
UpperCamelCase_: List[str] = div_val
UpperCamelCase_: Optional[int] = self.cutoffs[0]
UpperCamelCase_: Tuple = len(self.cutoffs ) - 1
UpperCamelCase_: Optional[Any] = self.shortlist_size + self.n_clusters
UpperCamelCase_: int = keep_order
UpperCamelCase_: Optional[Any] = []
UpperCamelCase_: int = []
def _a ( self , _lowerCamelCase ):
if self.n_clusters > 0:
UpperCamelCase_: str = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='zeros' , trainable=_lowerCamelCase , name='cluster_weight' )
UpperCamelCase_: Any = self.add_weight(
shape=(self.n_clusters,) , initializer='zeros' , trainable=_lowerCamelCase , name='cluster_bias' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
UpperCamelCase_: List[Any] = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='zeros' , trainable=_lowerCamelCase , name=f'''out_projs_._{i}''' , )
self.out_projs.append(_lowerCamelCase )
else:
self.out_projs.append(_lowerCamelCase )
UpperCamelCase_: List[Any] = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='zeros' , trainable=_lowerCamelCase , name=f'''out_layers_._{i}_._weight''' , )
UpperCamelCase_: Optional[Any] = self.add_weight(
shape=(self.vocab_size,) , initializer='zeros' , trainable=_lowerCamelCase , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
UpperCamelCase_ ,UpperCamelCase_: List[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase_: Dict = self.d_embed // (self.div_val**i)
UpperCamelCase_: Any = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='zeros' , trainable=_lowerCamelCase , name=f'''out_projs_._{i}''' )
self.out_projs.append(_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='zeros' , trainable=_lowerCamelCase , name=f'''out_layers_._{i}_._weight''' , )
UpperCamelCase_: int = self.add_weight(
shape=(r_idx - l_idx,) , initializer='zeros' , trainable=_lowerCamelCase , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(_lowerCamelCase )
@staticmethod
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
UpperCamelCase_: List[str] = x
if proj is not None:
UpperCamelCase_: Tuple = tf.einsum('ibd,ed->ibe' , _lowerCamelCase , _lowerCamelCase )
return tf.einsum('ibd,nd->ibn' , _lowerCamelCase , _lowerCamelCase ) + b
@staticmethod
def _a ( _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Tuple = shape_list(_lowerCamelCase )
UpperCamelCase_: Optional[Any] = tf.range(lp_size[0] , dtype=target.dtype )
UpperCamelCase_: Any = tf.stack([r, target] , 1 )
return tf.gather_nd(_lowerCamelCase , _lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase=False ):
UpperCamelCase_: int = 0
if self.n_clusters == 0:
UpperCamelCase_: Union[str, Any] = self._logit(_lowerCamelCase , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
UpperCamelCase_: List[Any] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=_lowerCamelCase , logits=_lowerCamelCase )
UpperCamelCase_: Optional[Any] = tf.nn.log_softmax(_lowerCamelCase , axis=-1 )
else:
UpperCamelCase_: Optional[Any] = shape_list(_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Optional[Any] = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
UpperCamelCase_ ,UpperCamelCase_: Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
UpperCamelCase_: List[Any] = (target >= l_idx) & (target < r_idx)
UpperCamelCase_: List[Any] = tf.where(_lowerCamelCase )
UpperCamelCase_: str = tf.boolean_mask(_lowerCamelCase , _lowerCamelCase ) - l_idx
if self.div_val == 1:
UpperCamelCase_: Optional[Any] = self.out_layers[0][0][l_idx:r_idx]
UpperCamelCase_: List[str] = self.out_layers[0][1][l_idx:r_idx]
else:
UpperCamelCase_: Optional[int] = self.out_layers[i][0]
UpperCamelCase_: int = self.out_layers[i][1]
if i == 0:
UpperCamelCase_: str = tf.concat([cur_W, self.cluster_weight] , 0 )
UpperCamelCase_: Any = tf.concat([cur_b, self.cluster_bias] , 0 )
UpperCamelCase_: List[Any] = self._logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , self.out_projs[0] )
UpperCamelCase_: int = tf.nn.log_softmax(_lowerCamelCase )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
UpperCamelCase_: Any = tf.boolean_mask(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: List[Any] = self._gather_logprob(_lowerCamelCase , _lowerCamelCase )
else:
UpperCamelCase_: str = self._logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , self.out_projs[i] )
UpperCamelCase_: Optional[Any] = tf.nn.log_softmax(_lowerCamelCase )
UpperCamelCase_: List[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
UpperCamelCase_: Dict = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(_lowerCamelCase )
if target is not None:
UpperCamelCase_: List[Any] = tf.boolean_mask(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: Optional[Any] = tf.boolean_mask(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: int = self._gather_logprob(_lowerCamelCase , _lowerCamelCase )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(_lowerCamelCase , -cur_logprob , shape_list(_lowerCamelCase ) )
UpperCamelCase_: Optional[Any] = tf.concat(_lowerCamelCase , axis=-1 )
if target is not None:
if return_mean:
UpperCamelCase_: Any = tf.reduce_mean(_lowerCamelCase )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(_lowerCamelCase )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(_lowerCamelCase , name=self.name , aggregation='mean' if return_mean else '' )
return out | 57 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE : Union[str, Any] = KarrasVeScheduler()
SCREAMING_SNAKE_CASE : Any = KarrasVePipeline(unet=a , scheduler=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(num_inference_steps=2 , generator=a , output_type="numpy" ).images
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = pipe(num_inference_steps=2 , generator=a , output_type="numpy" , return_dict=a )[0]
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = "google/ncsnpp-celebahq-256"
SCREAMING_SNAKE_CASE : List[Any] = UNetaDModel.from_pretrained(a )
SCREAMING_SNAKE_CASE : Any = KarrasVeScheduler()
SCREAMING_SNAKE_CASE : Optional[Any] = KarrasVePipeline(unet=a , scheduler=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe(num_inference_steps=20 , generator=a , output_type="numpy" ).images
SCREAMING_SNAKE_CASE : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 25 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : int = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''bit'''
_lowerCamelCase = ['''preactivation''', '''bottleneck''']
_lowerCamelCase = ['''SAME''', '''VALID''']
def __init__( self , _lowercase=3 , _lowercase=6_4 , _lowercase=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , _lowercase=[3, 4, 6, 3] , _lowercase="preactivation" , _lowercase="relu" , _lowercase=None , _lowercase=3_2 , _lowercase=0.0 , _lowercase=False , _lowercase=3_2 , _lowercase=1 , _lowercase=None , _lowercase=None , **_lowercase , ) -> Dict:
'''simple docstring'''
super().__init__(**_lowercase )
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
snake_case_ : Optional[Any] = global_padding.upper()
else:
raise ValueError(f'Padding strategy {global_padding} not supported' )
snake_case_ : str = num_channels
snake_case_ : Tuple = embedding_size
snake_case_ : Any = hidden_sizes
snake_case_ : int = depths
snake_case_ : Tuple = layer_type
snake_case_ : List[Any] = hidden_act
snake_case_ : int = global_padding
snake_case_ : List[Any] = num_groups
snake_case_ : Any = drop_path_rate
snake_case_ : Optional[int] = embedding_dynamic_padding
snake_case_ : int = output_stride
snake_case_ : Tuple = width_factor
snake_case_ : Optional[Any] = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(_lowercase ) + 1 )]
snake_case_ , snake_case_ : Tuple = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
| 58 |
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Optional[int] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : Optional[int] = 0
while b > 0:
if b & 1:
SCREAMING_SNAKE_CASE : Optional[Any] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res | 25 | 0 |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__A = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def lowerCAmelCase_ ( __a , __a , __a = 16000 ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: List[str] =int(round(sample_rate * max_length ) )
if len(__a ) <= sample_length:
return wav
lowerCamelCase__: List[Any] =randint(0 , len(__a ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowercase_ = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Name of a dataset from the datasets package"} )
lowercase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowercase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "A file containing the training audio paths and labels."} )
lowercase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "A file containing the validation audio paths and labels."} )
lowercase_ = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
lowercase_ = field(
default="validation" , metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
lowercase_ = field(
default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} , )
lowercase_ = field(
default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} )
lowercase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowercase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
lowercase_ = field(
default=20 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , )
@dataclass
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowercase_ = field(
default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
lowercase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowercase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} )
lowercase_ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowercase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Name or path of preprocessor config."} )
lowercase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to freeze the feature encoder layers of the model."} )
lowercase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to generate an attention mask in the feature extractor."} )
lowercase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowercase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
lowercase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`." , UpperCAmelCase_ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`.")
def lowerCAmelCase_ ( ) -> Any:
"""simple docstring"""
lowerCamelCase__: List[Any] =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Tuple =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , __a , __a )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase__: Optional[Any] =training_args.get_process_log_level()
logger.setLevel(__a )
transformers.utils.logging.set_verbosity(__a )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
lowerCamelCase__: Any =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase__: Union[str, Any] =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
lowerCamelCase__: List[str] =DatasetDict()
lowerCamelCase__: List[str] =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__: str =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F"""{", ".join(raw_datasets["train"].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
"Make sure to set `--label_column_name` to the correct text column - one of "
F"""{", ".join(raw_datasets["train"].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
lowerCamelCase__: Union[str, Any] =AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
lowerCamelCase__: List[str] =raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
lowerCamelCase__: int =feature_extractor.model_input_names[0]
def train_transforms(__a ):
lowerCamelCase__: List[str] =[]
for audio in batch[data_args.audio_column_name]:
lowerCamelCase__: str =random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(__a )
lowerCamelCase__: Dict =feature_extractor(__a , sampling_rate=feature_extractor.sampling_rate )
lowerCamelCase__: Tuple ={model_input_name: inputs.get(__a )}
lowerCamelCase__: Any =list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(__a ):
lowerCamelCase__: Dict =[audio["array"] for audio in batch[data_args.audio_column_name]]
lowerCamelCase__: List[Any] =feature_extractor(__a , sampling_rate=feature_extractor.sampling_rate )
lowerCamelCase__: Optional[Any] ={model_input_name: inputs.get(__a )}
lowerCamelCase__: Union[str, Any] =list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowerCamelCase__: Union[str, Any] =raw_datasets["train"].features[data_args.label_column_name].names
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] ={}, {}
for i, label in enumerate(__a ):
lowerCamelCase__: Tuple =str(__a )
lowerCamelCase__: Optional[Any] =label
# Load the accuracy metric from the datasets package
lowerCamelCase__: List[Any] =evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(__a ):
lowerCamelCase__: List[Any] =np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=__a , references=eval_pred.label_ids )
lowerCamelCase__: Dict =AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__a ) , labelaid=__a , idalabel=__a , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__: Any =AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCamelCase__: List[str] =(
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(__a , output_all_columns=__a )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCamelCase__: Union[str, Any] =(
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(__a , output_all_columns=__a )
# Initialize our trainer
lowerCamelCase__: List[str] =Trainer(
model=__a , args=__a , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=__a , tokenizer=__a , )
# Training
if training_args.do_train:
lowerCamelCase__: Union[str, Any] =None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase__: Optional[Any] =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase__: Tuple =last_checkpoint
lowerCamelCase__: Any =trainer.train(resume_from_checkpoint=__a )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCamelCase__: str =trainer.evaluate()
trainer.log_metrics("eval" , __a )
trainer.save_metrics("eval" , __a )
# Write model card and (optionally) push to hub
lowerCamelCase__: List[str] ={
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__a )
else:
trainer.create_model_card(**__a )
if __name__ == "__main__":
main()
| 59 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='roformer'
def __init__( self : Dict , a : Any=5_0000 , a : List[Any]=None , a : str=768 , a : str=12 , a : Tuple=12 , a : Optional[Any]=3072 , a : List[str]="gelu" , a : List[Any]=0.1 , a : Union[str, Any]=0.1 , a : Tuple=1536 , a : List[str]=2 , a : Tuple=0.02 , a : Any=1e-12 , a : Optional[int]=0 , a : Union[str, Any]=False , a : int=True , **a : str , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=a , **a )
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size if embedding_size is None else embedding_size
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE : Any = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = rotary_value
SCREAMING_SNAKE_CASE : int = use_cache
class _UpperCamelCase ( __A ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE : str = {0: "batch", 1: "sequence"}
SCREAMING_SNAKE_CASE : List[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] ) | 25 | 0 |
def lowerCamelCase_ ( _UpperCamelCase ) -> bool:
"""simple docstring"""
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''') )
def lowerCamelCase_ ( _UpperCamelCase ) -> bool:
"""simple docstring"""
snake_case_ : Optional[int] = credit_card_number
snake_case_ : List[Any] = 0
snake_case_ : int = len(_UpperCamelCase ) - 2
for i in range(_UpperCamelCase , -1 , -2 ):
# double the value of every second digit
snake_case_ : Union[str, Any] = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 ร 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
snake_case_ : List[Any] = cc_number[:i] + str(_UpperCamelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCamelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def lowerCamelCase_ ( _UpperCamelCase ) -> bool:
"""simple docstring"""
snake_case_ : Optional[Any] = f'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(f'''{error_message} it has nonnumerical characters.''' )
return False
if not 13 <= len(_UpperCamelCase ) <= 16:
print(f'''{error_message} of its length.''' )
return False
if not validate_initial_digits(_UpperCamelCase ):
print(f'''{error_message} of its first two digits.''' )
return False
if not luhn_validation(_UpperCamelCase ):
print(f'''{error_message} it fails the Luhn check.''' )
return False
print(f'''{credit_card_number} is a valid credit card number.''' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('''4111111111111111''')
validate_credit_card_number('''32323''')
| 60 |
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
a_ = logging.getLogger(__name__)
a_ = 'Hello world! cรฉcรฉ herlolip'
a_ = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : List[Any] = BertAbsConfig(
temp_dir="." , finetune_bert=_a , large=_a , share_emb=_a , use_bert_emb=_a , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
SCREAMING_SNAKE_CASE : Dict = torch.load(_a , lambda _a , _a: storage)
SCREAMING_SNAKE_CASE : str = AbsSummarizer(_a , torch.device("cpu") , _a)
original.eval()
SCREAMING_SNAKE_CASE : List[str] = BertAbsSummarizer(_a , torch.device("cpu"))
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model")
new_model.bert.load_state_dict(original.bert.state_dict())
new_model.decoder.load_state_dict(original.decoder.state_dict())
new_model.generator.load_state_dict(original.generator.state_dict())
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical")
SCREAMING_SNAKE_CASE : List[str] = BertTokenizer.from_pretrained("bert-base-uncased")
# prepare the model inputs
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode("This is sample รฉร alj'-.")
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_a)))
SCREAMING_SNAKE_CASE : int = torch.tensor(_a).unsqueeze(0)
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode("This is sample 3 รฉร alj'-.")
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_a)))
SCREAMING_SNAKE_CASE : int = torch.tensor(_a).unsqueeze(0)
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight)) == 0
# forward pass
SCREAMING_SNAKE_CASE : List[Any] = encoder_input_ids
SCREAMING_SNAKE_CASE : List[Any] = decoder_input_ids
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Dict = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
SCREAMING_SNAKE_CASE : Optional[int] = original(_a , _a , _a , _a , _a , _a , _a)[0]
SCREAMING_SNAKE_CASE : Dict = original.generator(_a)
SCREAMING_SNAKE_CASE : Any = new_model(
_a , _a , _a , _a , _a)[0]
SCREAMING_SNAKE_CASE : Tuple = new_model.generator(_a)
SCREAMING_SNAKE_CASE : List[Any] = torch.max(torch.abs(output_converted_model - output_original_model)).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_a))
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.max(torch.abs(output_converted_generator - output_original_generator)).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_a))
SCREAMING_SNAKE_CASE : int = torch.allclose(_a , _a , atol=1E-3)
if are_identical:
logging.info("all weights are equal up to 1e-3")
else:
raise ValueError("the weights are different. The new model is likely different from the original one.")
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary")
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
a_ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
) | 25 | 0 |
from math import pi, sqrt, tan
def _A ( lowerCAmelCase_ : float ):
"""simple docstring"""
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values" )
return 6 * side_length**2
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _A ( lowerCAmelCase_ : float ):
"""simple docstring"""
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values" )
return 4 * pi * radius**2
def _A ( lowerCAmelCase_ : float ):
"""simple docstring"""
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values" )
return 3 * pi * radius**2
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values" )
lowerCAmelCase__ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values" )
return 2 * pi * radius * (height + radius)
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values" )
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori" )
return 4 * pow(lowerCAmelCase_ , 2 ) * torus_radius * tube_radius
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values" )
return length * width
def _A ( lowerCAmelCase_ : float ):
"""simple docstring"""
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values" )
return side_length**2
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values" )
return (base * height) / 2
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle" )
lowerCAmelCase__ = (sidea + sidea + sidea) / 2
lowerCAmelCase__ = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values" )
return base * height
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values" )
return 1 / 2 * (basea + basea) * height
def _A ( lowerCAmelCase_ : float ):
"""simple docstring"""
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values" )
return pi * radius**2
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values" )
return pi * radius_x * radius_y
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values" )
return 1 / 2 * diagonal_a * diagonal_a
def _A ( lowerCAmelCase_ : int , lowerCAmelCase_ : float ):
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides" )
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F"""Rectangle: {area_rectangle(10, 20) = }""")
print(F"""Square: {area_square(10) = }""")
print(F"""Triangle: {area_triangle(10, 10) = }""")
print(F"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(F"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(F"""Rhombus: {area_rhombus(10, 20) = }""")
print(F"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(F"""Circle: {area_circle(20) = }""")
print(F"""Ellipse: {area_ellipse(10, 20) = }""")
print('\nSurface Areas of various geometric shapes: \n')
print(F"""Cube: {surface_area_cube(20) = }""")
print(F"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(F"""Sphere: {surface_area_sphere(20) = }""")
print(F"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(F"""Cone: {surface_area_cone(10, 20) = }""")
print(F"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(F"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(F"""Torus: {surface_area_torus(20, 10) = }""")
print(F"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(F"""Square: {area_reg_polygon(4, 10) = }""")
print(F"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 61 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
a_ = parser.parse_args()
a_ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
a_ = CLIPImageProcessor()
a_ = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
a_ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path) | 25 | 0 |
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = set()
# edges = list of graph's edges
SCREAMING_SNAKE_CASE : List[Any] = get_edges(lowercase )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = edges.pop()
chosen_vertices.add(lowercase )
chosen_vertices.add(lowercase )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowercase )
return chosen_vertices
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 62 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 25 | 0 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowerCamelCase__ ( __lowerCamelCase : int ):
__UpperCAmelCase : Optional[Any] = prime_factors(__lowerCamelCase )
if is_square_free(__lowerCamelCase ):
return -1 if len(__lowerCamelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
from math import pi, sqrt, tan
def lowerCamelCase__ ( _a):
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values")
return 6 * side_length**2
def lowerCamelCase__ ( _a , _a , _a):
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values")
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase__ ( _a):
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values")
return 4 * pi * radius**2
def lowerCamelCase__ ( _a):
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values")
return 3 * pi * radius**2
def lowerCamelCase__ ( _a , _a):
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values")
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase__ ( _a , _a , _a):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values")
SCREAMING_SNAKE_CASE : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase__ ( _a , _a):
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values")
return 2 * pi * radius * (height + radius)
def lowerCamelCase__ ( _a , _a):
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values")
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori")
return 4 * pow(_a , 2) * torus_radius * tube_radius
def lowerCamelCase__ ( _a , _a):
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values")
return length * width
def lowerCamelCase__ ( _a):
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values")
return side_length**2
def lowerCamelCase__ ( _a , _a):
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values")
return (base * height) / 2
def lowerCamelCase__ ( _a , _a , _a):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values")
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle")
SCREAMING_SNAKE_CASE : List[str] = (sidea + sidea + sidea) / 2
SCREAMING_SNAKE_CASE : Optional[int] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea))
return area
def lowerCamelCase__ ( _a , _a):
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values")
return base * height
def lowerCamelCase__ ( _a , _a , _a):
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values")
return 1 / 2 * (basea + basea) * height
def lowerCamelCase__ ( _a):
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values")
return pi * radius**2
def lowerCamelCase__ ( _a , _a):
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values")
return pi * radius_x * radius_y
def lowerCamelCase__ ( _a , _a):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values")
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase__ ( _a , _a):
if not isinstance(_a , _a) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides")
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side")
return (sides * length**2) / (4 * tan(pi / sides))
return (sides * length**2) / (4 * tan(pi / sides))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F'''Rectangle: {area_rectangle(10, 20) = }''')
print(F'''Square: {area_square(10) = }''')
print(F'''Triangle: {area_triangle(10, 10) = }''')
print(F'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(F'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(F'''Rhombus: {area_rhombus(10, 20) = }''')
print(F'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(F'''Circle: {area_circle(20) = }''')
print(F'''Ellipse: {area_ellipse(10, 20) = }''')
print('\nSurface Areas of various geometric shapes: \n')
print(F'''Cube: {surface_area_cube(20) = }''')
print(F'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(F'''Sphere: {surface_area_sphere(20) = }''')
print(F'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(F'''Cone: {surface_area_cone(10, 20) = }''')
print(F'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(F'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(F'''Torus: {surface_area_torus(20, 10) = }''')
print(F'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(F'''Square: {area_reg_polygon(4, 10) = }''')
print(F'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''') | 25 | 0 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ : Dict = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = XGLMTokenizer
__a = XGLMTokenizerFast
__a = True
__a = True
def UpperCamelCase_ ( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__: Union[str, Any]= XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Tuple= '''<pad>'''
SCREAMING_SNAKE_CASE__: Union[str, Any]= 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ) , lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: List[Any]= list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(len(lowerCAmelCase ) , 1008 )
def UpperCamelCase_ ( self ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: Optional[int]= XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase , ['''โThis''', '''โis''', '''โa''', '''โt''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE__: int= tokenizer.tokenize('''I was born in 92000, and this is falsรฉ.''' )
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''รฉ''',
'''.''',
] , )
SCREAMING_SNAKE_CASE__: str= tokenizer.convert_tokens_to_ids(lowerCAmelCase )
self.assertListEqual(
lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE__: Union[str, Any]= tokenizer.convert_ids_to_tokens(lowerCAmelCase )
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def UpperCamelCase_ ( self ) -> Union[str, Any]:
return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
def UpperCamelCase_ ( self ) -> List[Any]:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase , f.name )
SCREAMING_SNAKE_CASE__: str= XGLMTokenizer(f.name , keep_accents=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= pickle.dumps(lowerCAmelCase )
pickle.loads(lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__: List[Any]= self.get_tokenizer()
SCREAMING_SNAKE_CASE__: List[Any]= self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__: List[str]= '''I was born in 92000, and this is falsรฉ.'''
SCREAMING_SNAKE_CASE__: Tuple= tokenizer.tokenize(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= rust_tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__: int= tokenizer.encode(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= rust_tokenizer.encode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
@slow
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Tuple= '''Hello World!'''
SCREAMING_SNAKE_CASE__: Optional[int]= [2, 31227, 4447, 35]
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase ) )
@slow
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: List[Any]= (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
SCREAMING_SNAKE_CASE__: Dict= [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase ) )
@slow
def UpperCamelCase_ ( self ) -> str:
# fmt: off
SCREAMING_SNAKE_CASE__: Any= {
'''input_ids''': [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name='''facebook/xglm-564M''' , padding=lowerCAmelCase , )
| 64 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 25 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class __lowercase ( __lowerCamelCase ):
def __init__( self : Optional[Any] ,*A : Tuple ,**A : Union[str, Any] ):
'''simple docstring'''
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" ,A ,)
super().__init__(*A ,**A )
| 65 |
from __future__ import annotations
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Optional[Any] = 2
SCREAMING_SNAKE_CASE : Optional[int] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_a)
if n > 1:
factors.append(_a)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod() | 25 | 0 |
from __future__ import annotations
UpperCamelCase = list[list[int]]
# assigning initial values to the grid
UpperCamelCase = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCamelCase = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Matrix | None:
if location := find_empty_location(SCREAMING_SNAKE_CASE ):
_lowercase , _lowercase : Any = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : Optional[Any] = digit
if sudoku(SCREAMING_SNAKE_CASE ) is not None:
return grid
_lowercase : List[Any] = 0
return None
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> None:
for row in grid:
for cell in row:
print(SCREAMING_SNAKE_CASE , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
UpperCamelCase = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 66 |
from math import factorial, pi
def lowerCamelCase__ ( _a , _a = 30):
if not isinstance(_a , (int, float)):
raise ValueError("maclaurin_sin() requires either an int or float for theta")
if not isinstance(_a , _a) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy")
SCREAMING_SNAKE_CASE : int = float(_a)
SCREAMING_SNAKE_CASE : Dict = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1) for r in range(_a))
def lowerCamelCase__ ( _a , _a = 30):
if not isinstance(_a , (int, float)):
raise ValueError("maclaurin_cos() requires either an int or float for theta")
if not isinstance(_a , _a) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy")
SCREAMING_SNAKE_CASE : str = float(_a)
SCREAMING_SNAKE_CASE : Any = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r) for r in range(_a))
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15)) | 25 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple ,__A : Dict ,__A : List[Any]=7 ,__A : Dict=3 ,__A : Tuple=30 ,__A : Dict=400 ,__A : Any=True ,__A : List[Any]=None ,__A : Any=True ,__A : List[str]=[0.5, 0.5, 0.5] ,__A : Union[str, Any]=[0.5, 0.5, 0.5] ,__A : int=True ,__A : List[str]=1 / 255 ,__A : Union[str, Any]=True ,) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowercase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_lowercase = parent
_lowercase = batch_size
_lowercase = num_channels
_lowercase = min_resolution
_lowercase = max_resolution
_lowercase = do_resize
_lowercase = size
_lowercase = do_normalize
_lowercase = image_mean
_lowercase = image_std
_lowercase = do_rescale
_lowercase = rescale_factor
_lowercase = do_pad
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ,__A : List[str]=False ) -> Union[str, Any]:
if not batched:
_lowercase = image_inputs[0]
if isinstance(__A ,Image.Image ):
_lowercase , _lowercase = image.size
else:
_lowercase , _lowercase = image.shape[1], image.shape[2]
if w < h:
_lowercase = int(self.size['shortest_edge'] * h / w )
_lowercase = self.size['shortest_edge']
elif w > h:
_lowercase = self.size['shortest_edge']
_lowercase = int(self.size['shortest_edge'] * w / h )
else:
_lowercase = self.size['shortest_edge']
_lowercase = self.size['shortest_edge']
else:
_lowercase = []
for image in image_inputs:
_lowercase , _lowercase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowercase = max(__A ,key=lambda __A : item[0] )[0]
_lowercase = max(__A ,key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = DetaImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = DetaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
_lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A ,'image_mean' ) )
self.assertTrue(hasattr(__A ,'image_std' ) )
self.assertTrue(hasattr(__A ,'do_normalize' ) )
self.assertTrue(hasattr(__A ,'do_resize' ) )
self.assertTrue(hasattr(__A ,'do_rescale' ) )
self.assertTrue(hasattr(__A ,'do_pad' ) )
self.assertTrue(hasattr(__A ,'size' ) )
def __UpperCAmelCase ( self : str ) -> List[str]:
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad ,__A )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
pass
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A ,Image.Image )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,np.ndarray )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,torch.Tensor )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
# prepare image and target
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r' ) as f:
_lowercase = json.loads(f.read() )
_lowercase = {'image_id': 3_9769, 'annotations': target}
# encode them
_lowercase = DetaImageProcessor()
_lowercase = image_processing(images=__A ,annotations=__A ,return_tensors='pt' )
# verify pixel values
_lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,__A )
_lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__A ,atol=1e-4 ) )
# verify area
_lowercase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__A ) )
# verify boxes
_lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__A )
_lowercase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__A ,atol=1e-3 ) )
# verify image_id
_lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__A ) )
# verify is_crowd
_lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__A ) )
# verify class_labels
_lowercase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__A ) )
# verify orig_size
_lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__A ) )
# verify size
_lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__A ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# prepare image, target and masks_path
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r' ) as f:
_lowercase = json.loads(f.read() )
_lowercase = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
_lowercase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_lowercase = DetaImageProcessor(format='coco_panoptic' )
_lowercase = image_processing(images=__A ,annotations=__A ,masks_path=__A ,return_tensors='pt' )
# verify pixel values
_lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,__A )
_lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__A ,atol=1e-4 ) )
# verify area
_lowercase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__A ) )
# verify boxes
_lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__A )
_lowercase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__A ,atol=1e-3 ) )
# verify image_id
_lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__A ) )
# verify is_crowd
_lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__A ) )
# verify class_labels
_lowercase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__A ) )
# verify masks
_lowercase = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,__A )
# verify orig_size
_lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__A ) )
# verify size
_lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__A ) ) | 67 |
from __future__ import annotations
import math
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , a : int ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = size
# approximate the overall size of segment tree with given value
SCREAMING_SNAKE_CASE : Any = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
SCREAMING_SNAKE_CASE : Union[str, Any] = [0 for i in range(0 , 4 * size )]
SCREAMING_SNAKE_CASE : Any = [0 for i in range(0 , 4 * size )] # flag for lazy update
def __UpperCamelCase ( self : Tuple , a : int ) -> int:
"""simple docstring"""
return idx * 2
def __UpperCamelCase ( self : str , a : int ) -> int:
"""simple docstring"""
return idx * 2 + 1
def __UpperCamelCase ( self : int , a : int , a : int , a : int , a : list[int] ) -> None:
"""simple docstring"""
if left_element == right_element:
SCREAMING_SNAKE_CASE : int = a[left_element - 1]
else:
SCREAMING_SNAKE_CASE : Optional[int] = (left_element + right_element) // 2
self.build(self.left(a ) , a , a , a )
self.build(self.right(a ) , mid + 1 , a , a )
SCREAMING_SNAKE_CASE : List[Any] = max(
self.segment_tree[self.left(a )] , self.segment_tree[self.right(a )] )
def __UpperCamelCase ( self : Optional[Any] , a : int , a : int , a : int , a : int , a : int , a : int ) -> bool:
"""simple docstring"""
if self.flag[idx] is True:
SCREAMING_SNAKE_CASE : Any = self.lazy[idx]
SCREAMING_SNAKE_CASE : List[str] = False
if left_element != right_element:
SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx]
SCREAMING_SNAKE_CASE : int = self.lazy[idx]
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : List[Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
SCREAMING_SNAKE_CASE : Optional[Any] = val
if left_element != right_element:
SCREAMING_SNAKE_CASE : str = val
SCREAMING_SNAKE_CASE : str = val
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : Optional[Any] = True
return True
SCREAMING_SNAKE_CASE : int = (left_element + right_element) // 2
self.update(self.left(a ) , a , a , a , a , a )
self.update(self.right(a ) , mid + 1 , a , a , a , a )
SCREAMING_SNAKE_CASE : Optional[int] = max(
self.segment_tree[self.left(a )] , self.segment_tree[self.right(a )] )
return True
def __UpperCamelCase ( self : Dict , a : int , a : int , a : int , a : int , a : int ) -> int | float:
"""simple docstring"""
if self.flag[idx] is True:
SCREAMING_SNAKE_CASE : int = self.lazy[idx]
SCREAMING_SNAKE_CASE : List[Any] = False
if left_element != right_element:
SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx]
SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx]
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
SCREAMING_SNAKE_CASE : Dict = (left_element + right_element) // 2
SCREAMING_SNAKE_CASE : Tuple = self.query(self.left(a ) , a , a , a , a )
SCREAMING_SNAKE_CASE : Tuple = self.query(self.right(a ) , mid + 1 , a , a , a )
return max(a , a )
def __str__( self : str ) -> str:
"""simple docstring"""
return str([self.query(1 , 1 , self.size , a , a ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
a_ = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
a_ = 15
a_ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt) | 25 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def lowercase__ ( A_: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase =botoa.client("""iam""" )
__UpperCAmelCase ={
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=A_ , AssumeRolePolicyDocument=json.dumps(A_ , indent=2 ) )
__UpperCAmelCase ={
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=A_ , PolicyName=F'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(A_ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'''role {role_name} already exists. Using existing one''' )
def lowercase__ ( A_: Dict ) -> Any:
"""simple docstring"""
__UpperCAmelCase =botoa.client("""iam""" )
return iam_client.get_role(RoleName=A_ )["Role"]["Arn"]
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase =_ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , A_ , )
__UpperCAmelCase =None
if credentials_configuration == 0:
__UpperCAmelCase =_ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
__UpperCAmelCase =aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
__UpperCAmelCase =_ask_field("""AWS Access Key ID: """ )
__UpperCAmelCase =aws_access_key_id
__UpperCAmelCase =_ask_field("""AWS Secret Access Key: """ )
__UpperCAmelCase =aws_secret_access_key
__UpperCAmelCase =_ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
__UpperCAmelCase =aws_region
__UpperCAmelCase =_ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , A_ , )
if role_management == 0:
__UpperCAmelCase =_ask_field("""Enter your IAM role name: """ )
else:
__UpperCAmelCase ="""accelerate_sagemaker_execution_role"""
print(F'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(A_ )
__UpperCAmelCase =_ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =None
if is_custom_docker_image:
__UpperCAmelCase =_ask_field("""Enter your Docker image: """ , lambda A_ : str(A_ ).lower() )
__UpperCAmelCase =_ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =None
if is_sagemaker_inputs_enabled:
__UpperCAmelCase =_ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda A_ : str(A_ ).lower() , )
__UpperCAmelCase =_ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =None
if is_sagemaker_metrics_enabled:
__UpperCAmelCase =_ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda A_ : str(A_ ).lower() , )
__UpperCAmelCase =_ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
__UpperCAmelCase ={}
__UpperCAmelCase =_ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
if use_dynamo:
__UpperCAmelCase ="""dynamo_"""
__UpperCAmelCase =_ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
__UpperCAmelCase =_ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
if use_custom_options:
__UpperCAmelCase =_ask_options(
"""Which mode do you want to use?""" , A_ , lambda A_ : TORCH_DYNAMO_MODES[int(A_ )] , default="""default""" , )
__UpperCAmelCase =_ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =_ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase ="""Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
__UpperCAmelCase =_ask_options(
A_ , A_ , lambda A_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__UpperCAmelCase =_ask_field(A_ , lambda A_ : str(A_ ).lower() , default="""ml.p3.2xlarge""" )
__UpperCAmelCase =1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__UpperCAmelCase =_ask_field(
"""How many machines do you want use? [1]: """ , A_ , default=1 , )
__UpperCAmelCase =_ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=A_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=A_ , use_cpu=A_ , dynamo_config=A_ , eca_instance_type=A_ , profile=A_ , region=A_ , iam_role_name=A_ , mixed_precision=A_ , num_machines=A_ , sagemaker_inputs_file=A_ , sagemaker_metrics_file=A_ , )
| 68 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
sd_pipe.set_scheduler("sample_euler" )
SCREAMING_SNAKE_CASE : Optional[int] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = sd_pipe([prompt] , generator=a , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
SCREAMING_SNAKE_CASE : Tuple = output.images
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
sd_pipe.set_scheduler("sample_euler" )
SCREAMING_SNAKE_CASE : List[str] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe([prompt] , generator=a , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
SCREAMING_SNAKE_CASE : List[Any] = output.images
SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
SCREAMING_SNAKE_CASE : str = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = sd_pipe(
[prompt] , generator=a , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=a , )
SCREAMING_SNAKE_CASE : str = output.images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 25 | 0 |
'''simple docstring'''
from collections.abc import Callable
def __UpperCAmelCase ( _UpperCAmelCase : Callable[[float], float] , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float:
__snake_case = a
__snake_case = b
if function(_UpperCAmelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(_UpperCAmelCase ) == 0:
return b
elif (
function(_UpperCAmelCase ) * function(_UpperCAmelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("could not find root in given interval." )
else:
__snake_case = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_UpperCAmelCase ) == 0:
return mid
elif function(_UpperCAmelCase ) * function(_UpperCAmelCase ) < 0:
__snake_case = mid
else:
__snake_case = mid
__snake_case = start + (end - start) / 2.0
return mid
def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float:
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 69 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = ort.SessionOptions()
SCREAMING_SNAKE_CASE : Union[str, Any] = False
return options
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
SCREAMING_SNAKE_CASE : int = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Optional[Any] = "A red cat sitting on a park bench"
SCREAMING_SNAKE_CASE : Tuple = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(
prompt=a , image=a , mask_image=a , guidance_scale=7.5 , num_inference_steps=10 , generator=a , output_type="np" , )
SCREAMING_SNAKE_CASE : List[Any] = output.images
SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
SCREAMING_SNAKE_CASE : Union[str, Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=a , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : List[Any] = "A red cat sitting on a park bench"
SCREAMING_SNAKE_CASE : Dict = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe(
prompt=a , image=a , mask_image=a , guidance_scale=7.5 , num_inference_steps=20 , generator=a , output_type="np" , )
SCREAMING_SNAKE_CASE : List[str] = output.images
SCREAMING_SNAKE_CASE : Optional[int] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 | 25 | 0 |
def _SCREAMING_SNAKE_CASE ( lowercase : int = 10 ):
'''simple docstring'''
if not isinstance(lowercase , lowercase ) or n < 0:
raise ValueError('Invalid input' )
lowerCamelCase_ = 10**n
lowerCamelCase_ = 2_84_33 * (pow(2 , 7_83_04_57 , lowercase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(10) = }""")
| 70 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCamelCase__ ( _a):
return getitem, k
def lowerCamelCase__ ( _a , _a):
return setitem, k, v
def lowerCamelCase__ ( _a):
return delitem, k
def lowerCamelCase__ ( _a , _a , *_a):
try:
return fun(_a , *_a), None
except Exception as e:
return None, e
a_ = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
a_ = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
a_ = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
a_ = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
a_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items"),
pytest.param(_overwrite_items , id="overwrite items"),
pytest.param(_delete_items , id="delete items"),
pytest.param(_access_absent_items , id="access absent items"),
pytest.param(_add_with_resize_up , id="add with resize up"),
pytest.param(_add_with_resize_down , id="add with resize down"),
) , )
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Dict = HashMap(initial_block_size=4)
SCREAMING_SNAKE_CASE : List[str] = {}
for _, (fun, *args) in enumerate(_a):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = _run_operation(_a , _a , *_a)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = _run_operation(_a , _a , *_a)
assert my_res == py_res
assert str(_a) == str(_a)
assert set(_a) == set(_a)
assert len(_a) == len(_a)
assert set(my.items()) == set(py.items())
def lowerCamelCase__ ( ):
def is_public(_a) -> bool:
return not name.startswith("_")
SCREAMING_SNAKE_CASE : List[str] = {name for name in dir({}) if is_public(_a)}
SCREAMING_SNAKE_CASE : Union[str, Any] = {name for name in dir(HashMap()) if is_public(_a)}
assert dict_public_names > hash_public_names | 25 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
_lowerCamelCase = [
"""EAGER""",
"""AOT_EAGER""",
"""INDUCTOR""",
"""NVFUSER""",
"""AOT_NVFUSER""",
"""AOT_CUDAGRAPHS""",
"""OFI""",
"""FX2TRT""",
"""ONNXRT""",
"""IPEX""",
]
def a__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : Any=None ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = True
while ask_again:
UpperCAmelCase_ : Any = input(_SCREAMING_SNAKE_CASE )
try:
if default is not None and len(_SCREAMING_SNAKE_CASE ) == 0:
return default
return convert_value(_SCREAMING_SNAKE_CASE ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str=[] , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=0 ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = BulletMenu(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = menu.run(default_choice=_SCREAMING_SNAKE_CASE )
return convert_value(_SCREAMING_SNAKE_CASE ) if convert_value is not None else result
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[str] = int(_SCREAMING_SNAKE_CASE )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = int(_SCREAMING_SNAKE_CASE )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = int(_SCREAMING_SNAKE_CASE )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def a__ ( _SCREAMING_SNAKE_CASE : Any ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Any = int(_SCREAMING_SNAKE_CASE )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = int(_SCREAMING_SNAKE_CASE )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def a__ ( _SCREAMING_SNAKE_CASE : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return {"yes": True, "no": False}[value.lower()]
class _snake_case (argparse.RawDescriptionHelpFormatter):
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Optional[Any] = super()._format_usage(_snake_case ,_snake_case ,_snake_case ,_snake_case )
UpperCAmelCase_ : int = usage.replace("<command> [<args>] " ,"" )
return usage
| 71 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {'configuration_van': ['VAN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VanConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'VAN_PRETRAINED_MODEL_ARCHIVE_LIST',
'VanForImageClassification',
'VanModel',
'VanPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 25 | 0 |
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 72 |
from __future__ import annotations
def lowerCamelCase__ ( _a):
if len(_a) == 0:
return []
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = min(_a), max(_a)
SCREAMING_SNAKE_CASE : Dict = int(max_value - min_value) + 1
SCREAMING_SNAKE_CASE : list[list] = [[] for _ in range(_a)]
for i in my_list:
buckets[int(i - min_value)].append(_a)
return [v for bucket in buckets for v in sorted(_a)]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15] | 25 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Optional[Any] = logging.get_logger(__name__)
class _snake_case ( A__ ):
_lowercase : int = '''encoder-decoder'''
_lowercase : Dict = True
def __init__( self , **a) -> List[Any]:
super().__init__(**a)
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
SCREAMING_SNAKE_CASE = kwargs.pop('encoder')
SCREAMING_SNAKE_CASE = encoder_config.pop('model_type')
SCREAMING_SNAKE_CASE = kwargs.pop('decoder')
SCREAMING_SNAKE_CASE = decoder_config.pop('model_type')
from ..auto.configuration_auto import AutoConfig
SCREAMING_SNAKE_CASE = AutoConfig.for_model(a , **a)
SCREAMING_SNAKE_CASE = AutoConfig.for_model(a , **a)
SCREAMING_SNAKE_CASE = True
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , a , a , **a) -> PretrainedConfig:
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config')
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__)
SCREAMING_SNAKE_CASE = self.encoder.to_dict()
SCREAMING_SNAKE_CASE = self.decoder.to_dict()
SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 73 |
a_ = frozenset(
[
'prompt',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
a_ = frozenset(['prompt', 'negative_prompt'])
a_ = frozenset([])
a_ = frozenset(['image'])
a_ = frozenset(
[
'image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['image'])
a_ = frozenset(
[
'prompt',
'image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
a_ = frozenset(['prompt', 'image', 'negative_prompt'])
a_ = frozenset(
[
# Text guided image variation with an image mask
'prompt',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
a_ = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt'])
a_ = frozenset(
[
# image variation with an image mask
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['image', 'mask_image'])
a_ = frozenset(
[
'example_image',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['example_image', 'image', 'mask_image'])
a_ = frozenset(['class_labels'])
a_ = frozenset(['class_labels'])
a_ = frozenset(['batch_size'])
a_ = frozenset([])
a_ = frozenset(['batch_size'])
a_ = frozenset([])
a_ = frozenset(
[
'prompt',
'audio_length_in_s',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
a_ = frozenset(['prompt', 'negative_prompt'])
a_ = frozenset(['input_tokens'])
a_ = frozenset(['input_tokens']) | 25 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 74 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a_ = get_logger()
a_ = None
class _UpperCamelCase ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
'''simple docstring'''
def __init__( self : Optional[Any] , a : str=None , a : List[Any]=None , **a : Any ) -> Optional[Any]:
"""simple docstring"""
super().__init__(features=a )
import jax
from jaxlib.xla_client import Device
if isinstance(a , a ):
raise ValueError(
F"Expected {device} to be a `str` not {type(a )}, as `jaxlib.xla_extension.Device` "
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
SCREAMING_SNAKE_CASE : List[str] = device if isinstance(a , a ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE : str = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"Device with string identifier {self.device} not listed among the available "
F"devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default "
F"device: {str(jax.devices()[0] )}." )
SCREAMING_SNAKE_CASE : Any = str(jax.devices()[0] )
SCREAMING_SNAKE_CASE : Any = jnp_array_kwargs
@staticmethod
def __UpperCamelCase ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
"""simple docstring"""
import jax
return {str(a ): device for device in jax.devices()}
def __UpperCamelCase ( self : Dict , a : Tuple ) -> str:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(a , a ) and column:
if all(
isinstance(a , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(a , axis=0 )
return column
def __UpperCamelCase ( self : Dict , a : str ) -> str:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(a , (str, bytes, type(a )) ):
return value
elif isinstance(a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
SCREAMING_SNAKE_CASE : Dict = {"dtype": jnp.intaa}
else:
SCREAMING_SNAKE_CASE : str = {"dtype": jnp.intaa}
elif isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
SCREAMING_SNAKE_CASE : int = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a , PIL.Image.Image ):
SCREAMING_SNAKE_CASE : Dict = np.asarray(a )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE : Optional[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(a , **{**default_dtype, **self.jnp_array_kwargs} )
def __UpperCamelCase ( self : Any , a : List[str] ) -> Dict:
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(a , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(a , "__array__" ) and not isinstance(a , jax.Array ):
SCREAMING_SNAKE_CASE : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
elif isinstance(a , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
return self._tensorize(a )
def __UpperCamelCase ( self : Optional[Any] , a : dict ) -> Dict:
"""simple docstring"""
return map_nested(self._recursive_tensorize , a , map_list=a )
def __UpperCamelCase ( self : Dict , a : pa.Table ) -> Mapping:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.numpy_arrow_extractor().extract_row(a )
SCREAMING_SNAKE_CASE : List[Any] = self.python_features_decoder.decode_row(a )
return self.recursive_tensorize(a )
def __UpperCamelCase ( self : Optional[int] , a : pa.Table ) -> "jax.Array":
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.numpy_arrow_extractor().extract_column(a )
SCREAMING_SNAKE_CASE : Optional[Any] = self.python_features_decoder.decode_column(a , pa_table.column_names[0] )
SCREAMING_SNAKE_CASE : Tuple = self.recursive_tensorize(a )
SCREAMING_SNAKE_CASE : Optional[int] = self._consolidate(a )
return column
def __UpperCamelCase ( self : List[Any] , a : pa.Table ) -> Mapping:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.numpy_arrow_extractor().extract_batch(a )
SCREAMING_SNAKE_CASE : str = self.python_features_decoder.decode_batch(a )
SCREAMING_SNAKE_CASE : List[Any] = self.recursive_tensorize(a )
for column_name in batch:
SCREAMING_SNAKE_CASE : List[Any] = self._consolidate(batch[column_name] )
return batch | 25 | 0 |
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCamelCase__ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCamelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCamelCase__ = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
UpperCamelCase__ = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def a__ ( lowerCAmelCase__ ) -> List[str]:
UpperCAmelCase__ : str = None
# source code of `config_class`
UpperCAmelCase__ : str = inspect.getsource(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = _re_checkpoint.findall(lowerCAmelCase__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
UpperCAmelCase__ : List[str] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
UpperCAmelCase__ : Union[str, Any] = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
UpperCAmelCase__ : Any = ckpt_name
break
return checkpoint
def a__ ( ) -> Dict:
UpperCAmelCase__ : Optional[Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
UpperCAmelCase__ : Any = get_checkpoint_from_config_class(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[int] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
UpperCAmelCase__ : List[str] = '''\n'''.join(sorted(lowerCAmelCase__ ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 75 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _UpperCamelCase :
'''simple docstring'''
@staticmethod
def __UpperCamelCase ( *a : str , **a : int ) -> str:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =MODEL_FOR_OBJECT_DETECTION_MAPPING
def __UpperCamelCase ( self : Optional[Any] , a : str , a : Optional[Any] , a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = ObjectDetectionPipeline(model=a , image_processor=a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __UpperCamelCase ( self : List[Any] , a : Optional[int] , a : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(a ) , 0 )
for detected_object in outputs:
self.assertEqual(
a , {
"score": ANY(a ),
"label": ANY(a ),
"box": {"xmin": ANY(a ), "ymin": ANY(a ), "xmax": ANY(a ), "ymax": ANY(a )},
} , )
import datasets
SCREAMING_SNAKE_CASE : Any = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
SCREAMING_SNAKE_CASE : Dict = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
SCREAMING_SNAKE_CASE : Tuple = object_detector(a , threshold=0.0 )
self.assertEqual(len(a ) , len(a ) )
for outputs in batch_outputs:
self.assertGreater(len(a ) , 0 )
for detected_object in outputs:
self.assertEqual(
a , {
"score": ANY(a ),
"label": ANY(a ),
"box": {"xmin": ANY(a ), "ymin": ANY(a ), "xmax": ANY(a ), "ymax": ANY(a )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def __UpperCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
pass
@require_torch
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = "hf-internal-testing/tiny-detr-mobilenetsv3"
SCREAMING_SNAKE_CASE : Dict = AutoModelForObjectDetection.from_pretrained(a )
SCREAMING_SNAKE_CASE : Any = AutoFeatureExtractor.from_pretrained(a )
SCREAMING_SNAKE_CASE : Tuple = ObjectDetectionPipeline(model=a , feature_extractor=a )
SCREAMING_SNAKE_CASE : int = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
SCREAMING_SNAKE_CASE : Dict = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(a )
SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained(a )
SCREAMING_SNAKE_CASE : int = ObjectDetectionPipeline(model=a , feature_extractor=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
SCREAMING_SNAKE_CASE : int = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : Tuple = pipeline("object-detection" , model=a )
SCREAMING_SNAKE_CASE : Tuple = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
SCREAMING_SNAKE_CASE : str = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = 0.9985
SCREAMING_SNAKE_CASE : int = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : List[str] = pipeline("object-detection" , model=a )
SCREAMING_SNAKE_CASE : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=a )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = "Narsil/layoutlmv3-finetuned-funsd"
SCREAMING_SNAKE_CASE : Dict = 0.9993
SCREAMING_SNAKE_CASE : str = pipeline("object-detection" , model=a , threshold=a )
SCREAMING_SNAKE_CASE : List[Any] = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , ) | 25 | 0 |
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
a_ = logging.get_logger(__name__)
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : Union[str, Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(__UpperCamelCase ) == len(__UpperCamelCase ), f"""{len(__UpperCamelCase )} != {len(__UpperCamelCase )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
a_ = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
1_2: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 1_1],
4: [0, 4, 8, 1_1],
6: [0, 2, 4, 7, 9, 1_1],
9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1],
1_2: list(range(1_2)),
},
1_6: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 1_5],
3: [0, 8, 1_5],
4: [0, 5, 1_0, 1_5],
6: [0, 3, 6, 9, 1_2, 1_5],
8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5],
9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5],
1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5],
1_6: list(range(1_6)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
a_ = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]},
1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]},
}
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
try:
__lowercase : List[str] = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
f""" {n_student}""" )
return list(range(__UpperCamelCase ) )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if n_student > n_teacher:
raise ValueError(f"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(__UpperCamelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase = "student" , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase=False , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase , ):
__lowercase : int = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'''
assert (e is not None) or (d is not None), _msg
if isinstance(__UpperCamelCase , __UpperCamelCase ):
AutoTokenizer.from_pretrained(__UpperCamelCase ).save_pretrained(__UpperCamelCase ) # purely for convenience
__lowercase : str = AutoModelForSeqaSeqLM.from_pretrained(__UpperCamelCase ).eval()
else:
assert isinstance(__UpperCamelCase , __UpperCamelCase ), f"""teacher must be a model or string got type {type(__UpperCamelCase )}"""
__lowercase : Tuple = teacher.config.to_diff_dict()
try:
__lowercase ,__lowercase : Optional[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__lowercase : Union[str, Any] = teacher_e
if d is None:
__lowercase : Tuple = teacher_d
init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} )
except AttributeError: # T5
if hasattr(teacher.config , '''num_encoder_layers''' ):
__lowercase ,__lowercase : Union[str, Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__lowercase ,__lowercase : Union[str, Any] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__lowercase : List[str] = teacher_e
if d is None:
__lowercase : Dict = teacher_d
if hasattr(teacher.config , '''num_encoder_layers''' ):
init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} )
else:
init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(__UpperCamelCase )
# Copy weights
__lowercase : Union[str, Any] = teacher.config_class(**__UpperCamelCase )
__lowercase : Optional[int] = AutoModelForSeqaSeqLM.from_config(__UpperCamelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__lowercase : Optional[int] = student.load_state_dict(teacher.state_dict() , strict=__UpperCamelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__lowercase ,__lowercase : List[Any] = list(range(__UpperCamelCase ) ), list(range(__UpperCamelCase ) )
logger.info(
f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
f""" {save_path}""" )
student.save_pretrained(__UpperCamelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__lowercase : List[int] = pick_layers_to_copy(__UpperCamelCase , __UpperCamelCase )
if d_layers_to_copy is None:
__lowercase : List[int] = pick_layers_to_copy(__UpperCamelCase , __UpperCamelCase )
try:
if hasattr(
__UpperCamelCase , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , __UpperCamelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , __UpperCamelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , __UpperCamelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , __UpperCamelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , __UpperCamelCase )
copy_layers(teacher.decoder.block , student.decoder.block , __UpperCamelCase )
logger.info(
f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
__lowercase : Dict = {
'''teacher_type''': teacher.config.model_type,
'''copied_encoder_layers''': e_layers_to_copy,
'''copied_decoder_layers''': d_layers_to_copy,
}
student.save_pretrained(__UpperCamelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 76 |
def lowerCamelCase__ ( _a):
if not isinstance(_a , _a):
SCREAMING_SNAKE_CASE : Tuple = f"Input value of [number={number}] must be an integer"
raise TypeError(_a)
if number < 0:
return False
SCREAMING_SNAKE_CASE : Union[str, Any] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 25 | 0 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class a__ ( nn.Module ):
def __init__( self : Any , UpperCamelCase_ : int = 16 , UpperCamelCase_ : int = 88 , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : int = 1 , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 32 , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : str = "geglu" , UpperCamelCase_ : Optional[int] = None , ):
"""simple docstring"""
super().__init__()
__UpperCAmelCase : str = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=UpperCamelCase_ , attention_head_dim=UpperCamelCase_ , in_channels=UpperCamelCase_ , num_layers=UpperCamelCase_ , dropout=UpperCamelCase_ , norm_num_groups=UpperCamelCase_ , cross_attention_dim=UpperCamelCase_ , attention_bias=UpperCamelCase_ , sample_size=UpperCamelCase_ , num_vector_embeds=UpperCamelCase_ , activation_fn=UpperCamelCase_ , num_embeds_ada_norm=UpperCamelCase_ , )
for _ in range(2)
])
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
__UpperCAmelCase : Tuple = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
__UpperCAmelCase : Union[str, Any] = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
__UpperCAmelCase : Tuple = [1, 0]
def a_ ( self : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : bool = True , ):
"""simple docstring"""
__UpperCAmelCase : Any = hidden_states
__UpperCAmelCase : str = []
__UpperCAmelCase : int = 0
# attention_mask is not used yet
for i in range(2):
# for each of the two transformers, pass the corresponding condition tokens
__UpperCAmelCase : List[str] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
__UpperCAmelCase : Optional[Any] = self.transformer_index_for_condition[i]
__UpperCAmelCase : Dict = self.transformers[transformer_index](
UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , timestep=UpperCamelCase_ , cross_attention_kwargs=UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0]
encoded_states.append(encoded_state - input_states)
tokens_start += self.condition_lengths[i]
__UpperCAmelCase : Union[str, Any] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
__UpperCAmelCase : Optional[Any] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=UpperCamelCase_)
| 77 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __init__( self : Dict , a : Tuple , a : Any=13 , a : Any=7 , a : Union[str, Any]=True , a : List[Any]=True , a : List[str]=False , a : List[str]=True , a : Any=99 , a : str=32 , a : Any=5 , a : Optional[int]=4 , a : Union[str, Any]=37 , a : Dict="gelu" , a : List[Any]=0.1 , a : Optional[Any]=0.1 , a : List[str]=512 , a : Union[str, Any]=16 , a : str=2 , a : Dict=0.02 , a : Optional[int]=3 , a : Union[str, Any]=4 , a : int=None , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Any = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = seq_length
SCREAMING_SNAKE_CASE : List[Any] = is_training
SCREAMING_SNAKE_CASE : int = use_input_mask
SCREAMING_SNAKE_CASE : Tuple = use_token_type_ids
SCREAMING_SNAKE_CASE : str = use_labels
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Tuple = num_labels
SCREAMING_SNAKE_CASE : Tuple = num_choices
SCREAMING_SNAKE_CASE : Optional[Any] = scope
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : str = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self : Optional[Any] , a : int , a : Optional[int] , a : Optional[int] , a : Dict , a : str , a : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = DistilBertModel(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(a , a )
SCREAMING_SNAKE_CASE : Optional[Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Tuple , a : Optional[int] , a : Dict , a : Tuple , a : int , a : int , a : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = DistilBertForMaskedLM(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : str = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : List[Any] , a : int , a : Optional[Any] , a : Optional[Any] , a : str , a : str , a : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Optional[int] , a : str , a : Any , a : int , a : Optional[Any] , a : int , a : str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = DistilBertForSequenceClassification(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Optional[Any] , a : List[Any] , a : Optional[int] , a : Union[str, Any] , a : Dict , a : Any , a : Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : List[str] = DistilBertForTokenClassification(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : int , a : Any , a : Optional[int] , a : Union[str, Any] , a : Tuple , a : Optional[int] , a : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.num_choices
SCREAMING_SNAKE_CASE : Any = DistilBertForMultipleChoice(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Optional[Any] = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE)) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ =(
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = DistilBertModelTester(self )
SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=a , dim=37 )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a )
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a )
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a )
def __UpperCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a )
@slow
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Any = model_class(config=a )
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(a , a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.jit.trace(
a , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , "traced_model.pt" ) )
SCREAMING_SNAKE_CASE : Tuple = torch.jit.load(os.path.join(a , "traced_model.pt" ) , map_location=a )
loaded(inputs_dict["input_ids"].to(a ) , inputs_dict["attention_mask"].to(a ) )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = DistilBertModel.from_pretrained("distilbert-base-uncased" )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(a , attention_mask=a )[0]
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) ) | 25 | 0 |
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__) # pylint: disable=invalid-name
class __A ( UpperCamelCase__ ):
def __init__(self : Any , __a : CLIPSegForImageSegmentation , __a : CLIPSegProcessor , __a : AutoencoderKL , __a : CLIPTextModel , __a : CLIPTokenizer , __a : UNetaDConditionModel , __a : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __a : StableDiffusionSafetyChecker , __a : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
UpperCAmelCase_ = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , __a , standard_warn=__a )
UpperCAmelCase_ = dict(scheduler.config )
UpperCAmelCase_ = 1
UpperCAmelCase_ = FrozenDict(__a )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
UpperCAmelCase_ = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , __a , standard_warn=__a )
UpperCAmelCase_ = dict(scheduler.config )
UpperCAmelCase_ = True
UpperCAmelCase_ = FrozenDict(__a )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=__a , segmentation_processor=__a , vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , safety_checker=__a , feature_extractor=__a , )
def _lowercase (self : str , __a : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a )
def _lowercase (self : int ):
self.enable_attention_slicing(__a )
def _lowercase (self : Optional[Any] ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase_ = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(__a , __a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase (self : Optional[int] ):
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__(self : Dict , __a : Union[str, List[str]] , __a : Union[torch.FloatTensor, PIL.Image.Image] , __a : str , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : int , ):
UpperCAmelCase_ = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
UpperCAmelCase_ = self.segmentation_model(**__a )
UpperCAmelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
UpperCAmelCase_ = self.numpy_to_pil(__a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
UpperCAmelCase_ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=__a , image=__a , mask_image=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , )
| 78 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 25 | 0 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class UpperCAmelCase_ ( __lowerCamelCase ):
__lowerCamelCase = 42
class UpperCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
@register_to_config
def __init__( self , _lowerCAmelCase = 32 , _lowerCAmelCase = 64 , _lowerCAmelCase = 20 , _lowerCAmelCase = 768 , _lowerCAmelCase=77 , _lowerCAmelCase=4 , _lowerCAmelCase = 0.0 , _lowerCAmelCase = "silu" , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = "linear" , _lowerCAmelCase = "prd" , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , ):
super().__init__()
UpperCAmelCase__ : Dict = num_attention_heads
UpperCAmelCase__ : Optional[Any] = attention_head_dim
UpperCAmelCase__ : List[str] = num_attention_heads * attention_head_dim
UpperCAmelCase__ : List[str] = additional_embeddings
UpperCAmelCase__ : Tuple = time_embed_dim or inner_dim
UpperCAmelCase__ : int = embedding_proj_dim or embedding_dim
UpperCAmelCase__ : Tuple = clip_embed_dim or embedding_dim
UpperCAmelCase__ : Tuple = Timesteps(_lowerCAmelCase , _lowerCAmelCase , 0 )
UpperCAmelCase__ : int = TimestepEmbedding(_lowerCAmelCase , _lowerCAmelCase , out_dim=_lowerCAmelCase , act_fn=_lowerCAmelCase )
UpperCAmelCase__ : List[str] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase )
if embedding_proj_norm_type is None:
UpperCAmelCase__ : Optional[int] = None
elif embedding_proj_norm_type == "layer":
UpperCAmelCase__ : str = nn.LayerNorm(_lowerCAmelCase )
else:
raise ValueError(f"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}" )
UpperCAmelCase__ : List[Any] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase )
if encoder_hid_proj_type is None:
UpperCAmelCase__ : str = None
elif encoder_hid_proj_type == "linear":
UpperCAmelCase__ : List[Any] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase )
else:
raise ValueError(f"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}" )
UpperCAmelCase__ : Dict = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , _lowerCAmelCase ) )
if added_emb_type == "prd":
UpperCAmelCase__ : Tuple = nn.Parameter(torch.zeros(1 , 1 , _lowerCAmelCase ) )
elif added_emb_type is None:
UpperCAmelCase__ : Any = None
else:
raise ValueError(
f"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`." )
UpperCAmelCase__ : Dict = nn.ModuleList(
[
BasicTransformerBlock(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dropout=_lowerCAmelCase , activation_fn="""gelu""" , attention_bias=_lowerCAmelCase , )
for d in range(_lowerCAmelCase )
] )
if norm_in_type == "layer":
UpperCAmelCase__ : Optional[Any] = nn.LayerNorm(_lowerCAmelCase )
elif norm_in_type is None:
UpperCAmelCase__ : Union[str, Any] = None
else:
raise ValueError(f"Unsupported norm_in_type: {norm_in_type}." )
UpperCAmelCase__ : Optional[int] = nn.LayerNorm(_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : str = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0_0_0_0.0 )
causal_attention_mask.triu_(1 )
UpperCAmelCase__ : List[str] = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , _lowerCAmelCase , persistent=_lowerCAmelCase )
UpperCAmelCase__ : Tuple = nn.Parameter(torch.zeros(1 , _lowerCAmelCase ) )
UpperCAmelCase__ : List[Any] = nn.Parameter(torch.zeros(1 , _lowerCAmelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = {}
def fn_recursive_add_processors(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if hasattr(_lowerCAmelCase , """set_processor""" ):
UpperCAmelCase__ : int = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}" , _lowerCAmelCase , _lowerCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return processors
def __UpperCAmelCase ( self , _lowerCAmelCase ):
UpperCAmelCase__ : Union[str, Any] = len(self.attn_processors.keys() )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(_lowerCAmelCase )} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if hasattr(_lowerCAmelCase , """set_processor""" ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
module.set_processor(_lowerCAmelCase )
else:
module.set_processor(processor.pop(f"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}" , _lowerCAmelCase , _lowerCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __UpperCAmelCase ( self ):
self.set_attn_processor(AttnProcessor() )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = True , ):
UpperCAmelCase__ : List[str] = hidden_states.shape[0]
UpperCAmelCase__ : List[str] = timestep
if not torch.is_tensor(_lowerCAmelCase ):
UpperCAmelCase__ : Tuple = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(_lowerCAmelCase ) and len(timesteps.shape ) == 0:
UpperCAmelCase__ : List[str] = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase__ : Tuple = timesteps * torch.ones(_lowerCAmelCase , dtype=timesteps.dtype , device=timesteps.device )
UpperCAmelCase__ : Tuple = self.time_proj(_lowerCAmelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
UpperCAmelCase__ : Dict = timesteps_projected.to(dtype=self.dtype )
UpperCAmelCase__ : Dict = self.time_embedding(_lowerCAmelCase )
if self.embedding_proj_norm is not None:
UpperCAmelCase__ : List[str] = self.embedding_proj_norm(_lowerCAmelCase )
UpperCAmelCase__ : List[str] = self.embedding_proj(_lowerCAmelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
UpperCAmelCase__ : str = self.encoder_hidden_states_proj(_lowerCAmelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
UpperCAmelCase__ : List[Any] = self.proj_in(_lowerCAmelCase )
UpperCAmelCase__ : Dict = self.positional_embedding.to(hidden_states.dtype )
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : Any = 0
if encoder_hidden_states is not None:
additional_embeds.append(_lowerCAmelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
UpperCAmelCase__ : List[str] = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
UpperCAmelCase__ : Tuple = hidden_states[:, None, :]
UpperCAmelCase__ : Tuple = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
UpperCAmelCase__ : List[str] = self.prd_embedding.to(hidden_states.dtype ).expand(_lowerCAmelCase , -1 , -1 )
additional_embeds.append(_lowerCAmelCase )
UpperCAmelCase__ : Dict = torch.cat(
_lowerCAmelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
UpperCAmelCase__ : int = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
UpperCAmelCase__ : Union[str, Any] = F.pad(
_lowerCAmelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
UpperCAmelCase__ : List[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
UpperCAmelCase__ : str = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0
UpperCAmelCase__ : Any = F.pad(_lowerCAmelCase , (0, self.additional_embeddings) , value=0.0 )
UpperCAmelCase__ : Dict = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
UpperCAmelCase__ : List[str] = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
UpperCAmelCase__ : int = self.norm_in(_lowerCAmelCase )
for block in self.transformer_blocks:
UpperCAmelCase__ : List[Any] = block(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
UpperCAmelCase__ : int = self.norm_out(_lowerCAmelCase )
if self.prd_embedding is not None:
UpperCAmelCase__ : Dict = hidden_states[:, -1]
else:
UpperCAmelCase__ : Optional[Any] = hidden_states[:, additional_embeddings_len:]
UpperCAmelCase__ : str = self.proj_to_clip_embeddings(_lowerCAmelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase ):
UpperCAmelCase__ : int = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 79 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE : Union[str, Any] = KarrasVeScheduler()
SCREAMING_SNAKE_CASE : Any = KarrasVePipeline(unet=a , scheduler=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(num_inference_steps=2 , generator=a , output_type="numpy" ).images
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = pipe(num_inference_steps=2 , generator=a , output_type="numpy" , return_dict=a )[0]
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = "google/ncsnpp-celebahq-256"
SCREAMING_SNAKE_CASE : List[Any] = UNetaDModel.from_pretrained(a )
SCREAMING_SNAKE_CASE : Any = KarrasVeScheduler()
SCREAMING_SNAKE_CASE : Optional[Any] = KarrasVePipeline(unet=a , scheduler=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe(num_inference_steps=20 , generator=a , output_type="numpy" ).images
SCREAMING_SNAKE_CASE : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 25 | 0 |
from math import isclose, sqrt
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = point_y / 4 / point_x
__lowercase = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
__lowercase = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
__lowercase = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
__lowercase = outgoing_gradient**2 + 4
__lowercase = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
__lowercase = (point_y - outgoing_gradient * point_x) ** 2 - 100
__lowercase = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
__lowercase = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
__lowercase = x_minus if isclose(lowerCamelCase , lowerCamelCase ) else x_plus
__lowercase = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def snake_case ( lowerCamelCase = 1.4 , lowerCamelCase = -9.6 ):
'''simple docstring'''
__lowercase = 0
__lowercase = first_x_coord
__lowercase = first_y_coord
__lowercase = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
__lowercase , __lowercase , __lowercase = next_point(lowerCamelCase , lowerCamelCase , lowerCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F'''{solution() = }''')
| 80 |
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Optional[int] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : Optional[int] = 0
while b > 0:
if b & 1:
SCREAMING_SNAKE_CASE : Optional[Any] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res | 25 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.